diff options
66 files changed, 930 insertions, 438 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index aae2282600ca..ce91560229f5 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
@@ -1132,9 +1132,9 @@ and is between 256 and 4096 characters. It is defined in the file | |||
1132 | when set. | 1132 | when set. |
1133 | Format: <int> | 1133 | Format: <int> |
1134 | 1134 | ||
1135 | noaliencache [MM, NUMA] Disables the allcoation of alien caches in | 1135 | noaliencache [MM, NUMA, SLAB] Disables the allocation of alien |
1136 | the slab allocator. Saves per-node memory, but will | 1136 | caches in the slab allocator. Saves per-node memory, |
1137 | impact performance on real NUMA hardware. | 1137 | but will impact performance. |
1138 | 1138 | ||
1139 | noalign [KNL,ARM] | 1139 | noalign [KNL,ARM] |
1140 | 1140 | ||
@@ -1613,6 +1613,37 @@ and is between 256 and 4096 characters. It is defined in the file | |||
1613 | 1613 | ||
1614 | slram= [HW,MTD] | 1614 | slram= [HW,MTD] |
1615 | 1615 | ||
1616 | slub_debug [MM, SLUB] | ||
1617 | Enabling slub_debug allows one to determine the culprit | ||
1618 | if slab objects become corrupted. Enabling slub_debug | ||
1619 | creates guard zones around objects and poisons objects | ||
1620 | when not in use. Also tracks the last alloc / free. | ||
1621 | For more information see Documentation/vm/slub.txt. | ||
1622 | |||
1623 | slub_max_order= [MM, SLUB] | ||
1624 | Determines the maximum allowed order for slabs. Setting | ||
1625 | this too high may cause fragmentation. | ||
1626 | For more information see Documentation/vm/slub.txt. | ||
1627 | |||
1628 | slub_min_objects= [MM, SLUB] | ||
1629 | The minimum objects per slab. SLUB will increase the | ||
1630 | slab order up to slub_max_order to generate a | ||
1631 | sufficiently big slab to satisfy the number of objects. | ||
1632 | The higher the number of objects the smaller the overhead | ||
1633 | of tracking slabs. | ||
1634 | For more information see Documentation/vm/slub.txt. | ||
1635 | |||
1636 | slub_min_order= [MM, SLUB] | ||
1637 | Determines the mininum page order for slabs. Must be | ||
1638 | lower than slub_max_order | ||
1639 | For more information see Documentation/vm/slub.txt. | ||
1640 | |||
1641 | slub_nomerge [MM, SLUB] | ||
1642 | Disable merging of slabs of similar size. May be | ||
1643 | necessary if there is some reason to distinguish | ||
1644 | allocs to different slabs. | ||
1645 | For more information see Documentation/vm/slub.txt. | ||
1646 | |||
1616 | smart2= [HW] | 1647 | smart2= [HW] |
1617 | Format: <io1>[,<io2>[,...,<io8>]] | 1648 | Format: <io1>[,<io2>[,...,<io8>]] |
1618 | 1649 | ||
diff --git a/Documentation/networking/xfrm_sysctl.txt b/Documentation/networking/xfrm_sysctl.txt new file mode 100644 index 000000000000..5bbd16792fe1 --- /dev/null +++ b/Documentation/networking/xfrm_sysctl.txt | |||
@@ -0,0 +1,4 @@ | |||
1 | /proc/sys/net/core/xfrm_* Variables: | ||
2 | |||
3 | xfrm_acq_expires - INTEGER | ||
4 | default 30 - hard timeout in seconds for acquire requests | ||
diff --git a/Documentation/vm/slub.txt b/Documentation/vm/slub.txt index 727c8d81aeaf..1523320abd87 100644 --- a/Documentation/vm/slub.txt +++ b/Documentation/vm/slub.txt | |||
@@ -1,13 +1,9 @@ | |||
1 | Short users guide for SLUB | 1 | Short users guide for SLUB |
2 | -------------------------- | 2 | -------------------------- |
3 | 3 | ||
4 | First of all slub should transparently replace SLAB. If you enable | ||
5 | SLUB then everything should work the same (Note the word "should". | ||
6 | There is likely not much value in that word at this point). | ||
7 | |||
8 | The basic philosophy of SLUB is very different from SLAB. SLAB | 4 | The basic philosophy of SLUB is very different from SLAB. SLAB |
9 | requires rebuilding the kernel to activate debug options for all | 5 | requires rebuilding the kernel to activate debug options for all |
10 | SLABS. SLUB always includes full debugging but its off by default. | 6 | slab caches. SLUB always includes full debugging but it is off by default. |
11 | SLUB can enable debugging only for selected slabs in order to avoid | 7 | SLUB can enable debugging only for selected slabs in order to avoid |
12 | an impact on overall system performance which may make a bug more | 8 | an impact on overall system performance which may make a bug more |
13 | difficult to find. | 9 | difficult to find. |
@@ -76,13 +72,28 @@ of objects. | |||
76 | Careful with tracing: It may spew out lots of information and never stop if | 72 | Careful with tracing: It may spew out lots of information and never stop if |
77 | used on the wrong slab. | 73 | used on the wrong slab. |
78 | 74 | ||
79 | SLAB Merging | 75 | Slab merging |
80 | ------------ | 76 | ------------ |
81 | 77 | ||
82 | If no debugging is specified then SLUB may merge similar slabs together | 78 | If no debug options are specified then SLUB may merge similar slabs together |
83 | in order to reduce overhead and increase cache hotness of objects. | 79 | in order to reduce overhead and increase cache hotness of objects. |
84 | slabinfo -a displays which slabs were merged together. | 80 | slabinfo -a displays which slabs were merged together. |
85 | 81 | ||
82 | Slab validation | ||
83 | --------------- | ||
84 | |||
85 | SLUB can validate all object if the kernel was booted with slub_debug. In | ||
86 | order to do so you must have the slabinfo tool. Then you can do | ||
87 | |||
88 | slabinfo -v | ||
89 | |||
90 | which will test all objects. Output will be generated to the syslog. | ||
91 | |||
92 | This also works in a more limited way if boot was without slab debug. | ||
93 | In that case slabinfo -v simply tests all reachable objects. Usually | ||
94 | these are in the cpu slabs and the partial slabs. Full slabs are not | ||
95 | tracked by SLUB in a non debug situation. | ||
96 | |||
86 | Getting more performance | 97 | Getting more performance |
87 | ------------------------ | 98 | ------------------------ |
88 | 99 | ||
@@ -91,9 +102,9 @@ list_lock once in a while to deal with partial slabs. That overhead is | |||
91 | governed by the order of the allocation for each slab. The allocations | 102 | governed by the order of the allocation for each slab. The allocations |
92 | can be influenced by kernel parameters: | 103 | can be influenced by kernel parameters: |
93 | 104 | ||
94 | slub_min_objects=x (default 8) | 105 | slub_min_objects=x (default 4) |
95 | slub_min_order=x (default 0) | 106 | slub_min_order=x (default 0) |
96 | slub_max_order=x (default 4) | 107 | slub_max_order=x (default 1) |
97 | 108 | ||
98 | slub_min_objects allows to specify how many objects must at least fit | 109 | slub_min_objects allows to specify how many objects must at least fit |
99 | into one slab in order for the allocation order to be acceptable. | 110 | into one slab in order for the allocation order to be acceptable. |
@@ -109,5 +120,107 @@ longer be checked. This is useful to avoid SLUB trying to generate | |||
109 | super large order pages to fit slub_min_objects of a slab cache with | 120 | super large order pages to fit slub_min_objects of a slab cache with |
110 | large object sizes into one high order page. | 121 | large object sizes into one high order page. |
111 | 122 | ||
112 | 123 | SLUB Debug output | |
113 | Christoph Lameter, <clameter@sgi.com>, April 10, 2007 | 124 | ----------------- |
125 | |||
126 | Here is a sample of slub debug output: | ||
127 | |||
128 | *** SLUB kmalloc-8: Redzone Active@0xc90f6d20 slab 0xc528c530 offset=3360 flags=0x400000c3 inuse=61 freelist=0xc90f6d58 | ||
129 | Bytes b4 0xc90f6d10: 00 00 00 00 00 00 00 00 5a 5a 5a 5a 5a 5a 5a 5a ........ZZZZZZZZ | ||
130 | Object 0xc90f6d20: 31 30 31 39 2e 30 30 35 1019.005 | ||
131 | Redzone 0xc90f6d28: 00 cc cc cc . | ||
132 | FreePointer 0xc90f6d2c -> 0xc90f6d58 | ||
133 | Last alloc: get_modalias+0x61/0xf5 jiffies_ago=53 cpu=1 pid=554 | ||
134 | Filler 0xc90f6d50: 5a 5a 5a 5a 5a 5a 5a 5a ZZZZZZZZ | ||
135 | [<c010523d>] dump_trace+0x63/0x1eb | ||
136 | [<c01053df>] show_trace_log_lvl+0x1a/0x2f | ||
137 | [<c010601d>] show_trace+0x12/0x14 | ||
138 | [<c0106035>] dump_stack+0x16/0x18 | ||
139 | [<c017e0fa>] object_err+0x143/0x14b | ||
140 | [<c017e2cc>] check_object+0x66/0x234 | ||
141 | [<c017eb43>] __slab_free+0x239/0x384 | ||
142 | [<c017f446>] kfree+0xa6/0xc6 | ||
143 | [<c02e2335>] get_modalias+0xb9/0xf5 | ||
144 | [<c02e23b7>] dmi_dev_uevent+0x27/0x3c | ||
145 | [<c027866a>] dev_uevent+0x1ad/0x1da | ||
146 | [<c0205024>] kobject_uevent_env+0x20a/0x45b | ||
147 | [<c020527f>] kobject_uevent+0xa/0xf | ||
148 | [<c02779f1>] store_uevent+0x4f/0x58 | ||
149 | [<c027758e>] dev_attr_store+0x29/0x2f | ||
150 | [<c01bec4f>] sysfs_write_file+0x16e/0x19c | ||
151 | [<c0183ba7>] vfs_write+0xd1/0x15a | ||
152 | [<c01841d7>] sys_write+0x3d/0x72 | ||
153 | [<c0104112>] sysenter_past_esp+0x5f/0x99 | ||
154 | [<b7f7b410>] 0xb7f7b410 | ||
155 | ======================= | ||
156 | @@@ SLUB kmalloc-8: Restoring redzone (0xcc) from 0xc90f6d28-0xc90f6d2b | ||
157 | |||
158 | |||
159 | |||
160 | If SLUB encounters a corrupted object then it will perform the following | ||
161 | actions: | ||
162 | |||
163 | 1. Isolation and report of the issue | ||
164 | |||
165 | This will be a message in the system log starting with | ||
166 | |||
167 | *** SLUB <slab cache affected>: <What went wrong>@<object address> | ||
168 | offset=<offset of object into slab> flags=<slabflags> | ||
169 | inuse=<objects in use in this slab> freelist=<first free object in slab> | ||
170 | |||
171 | 2. Report on how the problem was dealt with in order to ensure the continued | ||
172 | operation of the system. | ||
173 | |||
174 | These are messages in the system log beginning with | ||
175 | |||
176 | @@@ SLUB <slab cache affected>: <corrective action taken> | ||
177 | |||
178 | |||
179 | In the above sample SLUB found that the Redzone of an active object has | ||
180 | been overwritten. Here a string of 8 characters was written into a slab that | ||
181 | has the length of 8 characters. However, a 8 character string needs a | ||
182 | terminating 0. That zero has overwritten the first byte of the Redzone field. | ||
183 | After reporting the details of the issue encountered the @@@ SLUB message | ||
184 | tell us that SLUB has restored the redzone to its proper value and then | ||
185 | system operations continue. | ||
186 | |||
187 | Various types of lines can follow the @@@ SLUB line: | ||
188 | |||
189 | Bytes b4 <address> : <bytes> | ||
190 | Show a few bytes before the object where the problem was detected. | ||
191 | Can be useful if the corruption does not stop with the start of the | ||
192 | object. | ||
193 | |||
194 | Object <address> : <bytes> | ||
195 | The bytes of the object. If the object is inactive then the bytes | ||
196 | typically contain poisoning values. Any non-poison value shows a | ||
197 | corruption by a write after free. | ||
198 | |||
199 | Redzone <address> : <bytes> | ||
200 | The redzone following the object. The redzone is used to detect | ||
201 | writes after the object. All bytes should always have the same | ||
202 | value. If there is any deviation then it is due to a write after | ||
203 | the object boundary. | ||
204 | |||
205 | Freepointer | ||
206 | The pointer to the next free object in the slab. May become | ||
207 | corrupted if overwriting continues after the red zone. | ||
208 | |||
209 | Last alloc: | ||
210 | Last free: | ||
211 | Shows the address from which the object was allocated/freed last. | ||
212 | We note the pid, the time and the CPU that did so. This is usually | ||
213 | the most useful information to figure out where things went wrong. | ||
214 | Here get_modalias() did an kmalloc(8) instead of a kmalloc(9). | ||
215 | |||
216 | Filler <address> : <bytes> | ||
217 | Unused data to fill up the space in order to get the next object | ||
218 | properly aligned. In the debug case we make sure that there are | ||
219 | at least 4 bytes of filler. This allow for the detection of writes | ||
220 | before the object. | ||
221 | |||
222 | Following the filler will be a stackdump. That stackdump describes the | ||
223 | location where the error was detected. The cause of the corruption is more | ||
224 | likely to be found by looking at the information about the last alloc / free. | ||
225 | |||
226 | Christoph Lameter, <clameter@sgi.com>, May 23, 2007 | ||
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c index 08f07a74a9d3..88baed1e7e83 100644 --- a/arch/i386/kernel/smpboot.c +++ b/arch/i386/kernel/smpboot.c | |||
@@ -943,10 +943,9 @@ exit: | |||
943 | 943 | ||
944 | static void smp_tune_scheduling(void) | 944 | static void smp_tune_scheduling(void) |
945 | { | 945 | { |
946 | unsigned long cachesize; /* kB */ | ||
947 | |||
948 | if (cpu_khz) { | 946 | if (cpu_khz) { |
949 | cachesize = boot_cpu_data.x86_cache_size; | 947 | /* cache size in kB */ |
948 | long cachesize = boot_cpu_data.x86_cache_size; | ||
950 | 949 | ||
951 | if (cachesize > 0) | 950 | if (cachesize > 0) |
952 | max_cache_size = cachesize * 1024; | 951 | max_cache_size = cachesize * 1024; |
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig index b8536c7c0877..85cdd23b0447 100644 --- a/arch/m68k/Kconfig +++ b/arch/m68k/Kconfig | |||
@@ -355,8 +355,9 @@ config RMW_INSNS | |||
355 | adventurous. | 355 | adventurous. |
356 | 356 | ||
357 | config SINGLE_MEMORY_CHUNK | 357 | config SINGLE_MEMORY_CHUNK |
358 | bool "Use one physical chunk of memory only" | 358 | bool "Use one physical chunk of memory only" if ADVANCED && !SUN3 |
359 | depends on ADVANCED && !SUN3 | 359 | default y if SUN3 |
360 | select NEED_MULTIPLE_NODES | ||
360 | help | 361 | help |
361 | Ignore all but the first contiguous chunk of physical memory for VM | 362 | Ignore all but the first contiguous chunk of physical memory for VM |
362 | purposes. This will save a few bytes kernel size and may speed up | 363 | purposes. This will save a few bytes kernel size and may speed up |
@@ -377,6 +378,14 @@ config 060_WRITETHROUGH | |||
377 | is hardwired on. The 53c710 SCSI driver is known to suffer from | 378 | is hardwired on. The 53c710 SCSI driver is known to suffer from |
378 | this problem. | 379 | this problem. |
379 | 380 | ||
381 | config ARCH_DISCONTIGMEM_ENABLE | ||
382 | def_bool !SINGLE_MEMORY_CHUNK | ||
383 | |||
384 | config NODES_SHIFT | ||
385 | int | ||
386 | default "3" | ||
387 | depends on !SINGLE_MEMORY_CHUNK | ||
388 | |||
380 | source "mm/Kconfig" | 389 | source "mm/Kconfig" |
381 | 390 | ||
382 | endmenu | 391 | endmenu |
diff --git a/arch/m68k/Makefile b/arch/m68k/Makefile index c20831a7e1a9..aa383a5ea7ac 100644 --- a/arch/m68k/Makefile +++ b/arch/m68k/Makefile | |||
@@ -19,6 +19,7 @@ COMPILE_ARCH = $(shell uname -m) | |||
19 | # override top level makefile | 19 | # override top level makefile |
20 | AS += -m68020 | 20 | AS += -m68020 |
21 | LDFLAGS := -m m68kelf | 21 | LDFLAGS := -m m68kelf |
22 | LDFLAGS_MODULE += -T $(srctree)/arch/m68k/kernel/module.lds | ||
22 | ifneq ($(COMPILE_ARCH),$(ARCH)) | 23 | ifneq ($(COMPILE_ARCH),$(ARCH)) |
23 | # prefix for cross-compiling binaries | 24 | # prefix for cross-compiling binaries |
24 | CROSS_COMPILE = m68k-linux-gnu- | 25 | CROSS_COMPILE = m68k-linux-gnu- |
diff --git a/arch/m68k/kernel/Makefile b/arch/m68k/kernel/Makefile index 0b68ab8d63d1..a806208c7fb5 100644 --- a/arch/m68k/kernel/Makefile +++ b/arch/m68k/kernel/Makefile | |||
@@ -9,13 +9,12 @@ else | |||
9 | endif | 9 | endif |
10 | extra-y += vmlinux.lds | 10 | extra-y += vmlinux.lds |
11 | 11 | ||
12 | obj-y := entry.o process.o traps.o ints.o signal.o ptrace.o \ | 12 | obj-y := entry.o process.o traps.o ints.o signal.o ptrace.o module.o \ |
13 | sys_m68k.o time.o semaphore.o setup.o m68k_ksyms.o devres.o | 13 | sys_m68k.o time.o semaphore.o setup.o m68k_ksyms.o devres.o |
14 | 14 | ||
15 | devres-y = ../../../kernel/irq/devres.o | 15 | devres-y = ../../../kernel/irq/devres.o |
16 | 16 | ||
17 | obj-$(CONFIG_PCI) += bios32.o | 17 | obj-$(CONFIG_PCI) += bios32.o |
18 | obj-$(CONFIG_MODULES) += module.o | ||
19 | obj-y$(CONFIG_MMU_SUN3) += dma.o # no, it's not a typo | 18 | obj-y$(CONFIG_MMU_SUN3) += dma.o # no, it's not a typo |
20 | 19 | ||
21 | EXTRA_AFLAGS := -traditional | 20 | EXTRA_AFLAGS := -traditional |
diff --git a/arch/m68k/kernel/module.c b/arch/m68k/kernel/module.c index 3b1a2ff61ddc..774862bc6977 100644 --- a/arch/m68k/kernel/module.c +++ b/arch/m68k/kernel/module.c | |||
@@ -1,3 +1,9 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file COPYING in the main directory of this archive | ||
4 | * for more details. | ||
5 | */ | ||
6 | |||
1 | #include <linux/moduleloader.h> | 7 | #include <linux/moduleloader.h> |
2 | #include <linux/elf.h> | 8 | #include <linux/elf.h> |
3 | #include <linux/vmalloc.h> | 9 | #include <linux/vmalloc.h> |
@@ -11,6 +17,8 @@ | |||
11 | #define DEBUGP(fmt...) | 17 | #define DEBUGP(fmt...) |
12 | #endif | 18 | #endif |
13 | 19 | ||
20 | #ifdef CONFIG_MODULES | ||
21 | |||
14 | void *module_alloc(unsigned long size) | 22 | void *module_alloc(unsigned long size) |
15 | { | 23 | { |
16 | if (size == 0) | 24 | if (size == 0) |
@@ -118,11 +126,32 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, | |||
118 | 126 | ||
119 | int module_finalize(const Elf_Ehdr *hdr, | 127 | int module_finalize(const Elf_Ehdr *hdr, |
120 | const Elf_Shdr *sechdrs, | 128 | const Elf_Shdr *sechdrs, |
121 | struct module *me) | 129 | struct module *mod) |
122 | { | 130 | { |
131 | module_fixup(mod, mod->arch.fixup_start, mod->arch.fixup_end); | ||
132 | |||
123 | return 0; | 133 | return 0; |
124 | } | 134 | } |
125 | 135 | ||
126 | void module_arch_cleanup(struct module *mod) | 136 | void module_arch_cleanup(struct module *mod) |
127 | { | 137 | { |
128 | } | 138 | } |
139 | |||
140 | #endif /* CONFIG_MODULES */ | ||
141 | |||
142 | void module_fixup(struct module *mod, struct m68k_fixup_info *start, | ||
143 | struct m68k_fixup_info *end) | ||
144 | { | ||
145 | struct m68k_fixup_info *fixup; | ||
146 | |||
147 | for (fixup = start; fixup < end; fixup++) { | ||
148 | switch (fixup->type) { | ||
149 | case m68k_fixup_memoffset: | ||
150 | *(u32 *)fixup->addr = m68k_memoffset; | ||
151 | break; | ||
152 | case m68k_fixup_vnode_shift: | ||
153 | *(u16 *)fixup->addr += m68k_virt_to_node_shift; | ||
154 | break; | ||
155 | } | ||
156 | } | ||
157 | } | ||
diff --git a/arch/m68k/kernel/module.lds b/arch/m68k/kernel/module.lds new file mode 100644 index 000000000000..fda94fa38243 --- /dev/null +++ b/arch/m68k/kernel/module.lds | |||
@@ -0,0 +1,7 @@ | |||
1 | SECTIONS { | ||
2 | .m68k_fixup : { | ||
3 | __start_fixup = .; | ||
4 | *(.m68k_fixup) | ||
5 | __stop_fixup = .; | ||
6 | } | ||
7 | } | ||
diff --git a/arch/m68k/kernel/setup.c b/arch/m68k/kernel/setup.c index 610319356691..215c7bd43924 100644 --- a/arch/m68k/kernel/setup.c +++ b/arch/m68k/kernel/setup.c | |||
@@ -60,14 +60,12 @@ extern unsigned long availmem; | |||
60 | int m68k_num_memory; | 60 | int m68k_num_memory; |
61 | int m68k_realnum_memory; | 61 | int m68k_realnum_memory; |
62 | EXPORT_SYMBOL(m68k_realnum_memory); | 62 | EXPORT_SYMBOL(m68k_realnum_memory); |
63 | #ifdef CONFIG_SINGLE_MEMORY_CHUNK | ||
64 | unsigned long m68k_memoffset; | 63 | unsigned long m68k_memoffset; |
65 | EXPORT_SYMBOL(m68k_memoffset); | 64 | EXPORT_SYMBOL(m68k_memoffset); |
66 | #endif | ||
67 | struct mem_info m68k_memory[NUM_MEMINFO]; | 65 | struct mem_info m68k_memory[NUM_MEMINFO]; |
68 | EXPORT_SYMBOL(m68k_memory); | 66 | EXPORT_SYMBOL(m68k_memory); |
69 | 67 | ||
70 | static struct mem_info m68k_ramdisk; | 68 | struct mem_info m68k_ramdisk; |
71 | 69 | ||
72 | static char m68k_command_line[CL_SIZE]; | 70 | static char m68k_command_line[CL_SIZE]; |
73 | 71 | ||
@@ -208,9 +206,6 @@ static void __init m68k_parse_bootinfo(const struct bi_record *record) | |||
208 | void __init setup_arch(char **cmdline_p) | 206 | void __init setup_arch(char **cmdline_p) |
209 | { | 207 | { |
210 | extern int _etext, _edata, _end; | 208 | extern int _etext, _edata, _end; |
211 | #ifndef CONFIG_SUN3 | ||
212 | unsigned long endmem, startmem; | ||
213 | #endif | ||
214 | int i; | 209 | int i; |
215 | 210 | ||
216 | /* The bootinfo is located right after the kernel bss */ | 211 | /* The bootinfo is located right after the kernel bss */ |
@@ -320,30 +315,16 @@ void __init setup_arch(char **cmdline_p) | |||
320 | panic("No configuration setup"); | 315 | panic("No configuration setup"); |
321 | } | 316 | } |
322 | 317 | ||
323 | #ifndef CONFIG_SUN3 | 318 | paging_init(); |
324 | startmem= m68k_memory[0].addr; | ||
325 | endmem = startmem + m68k_memory[0].size; | ||
326 | high_memory = (void *)PAGE_OFFSET; | ||
327 | for (i = 0; i < m68k_num_memory; i++) { | ||
328 | m68k_memory[i].size &= MASK_256K; | ||
329 | if (m68k_memory[i].addr < startmem) | ||
330 | startmem = m68k_memory[i].addr; | ||
331 | if (m68k_memory[i].addr+m68k_memory[i].size > endmem) | ||
332 | endmem = m68k_memory[i].addr+m68k_memory[i].size; | ||
333 | high_memory += m68k_memory[i].size; | ||
334 | } | ||
335 | |||
336 | availmem += init_bootmem_node(NODE_DATA(0), availmem >> PAGE_SHIFT, | ||
337 | startmem >> PAGE_SHIFT, endmem >> PAGE_SHIFT); | ||
338 | |||
339 | for (i = 0; i < m68k_num_memory; i++) | ||
340 | free_bootmem(m68k_memory[i].addr, m68k_memory[i].size); | ||
341 | |||
342 | reserve_bootmem(m68k_memory[0].addr, availmem - m68k_memory[0].addr); | ||
343 | 319 | ||
320 | #ifndef CONFIG_SUN3 | ||
321 | for (i = 1; i < m68k_num_memory; i++) | ||
322 | free_bootmem_node(NODE_DATA(i), m68k_memory[i].addr, | ||
323 | m68k_memory[i].size); | ||
344 | #ifdef CONFIG_BLK_DEV_INITRD | 324 | #ifdef CONFIG_BLK_DEV_INITRD |
345 | if (m68k_ramdisk.size) { | 325 | if (m68k_ramdisk.size) { |
346 | reserve_bootmem(m68k_ramdisk.addr, m68k_ramdisk.size); | 326 | reserve_bootmem_node(__virt_to_node(phys_to_virt(m68k_ramdisk.addr)), |
327 | m68k_ramdisk.addr, m68k_ramdisk.size); | ||
347 | initrd_start = (unsigned long)phys_to_virt(m68k_ramdisk.addr); | 328 | initrd_start = (unsigned long)phys_to_virt(m68k_ramdisk.addr); |
348 | initrd_end = initrd_start + m68k_ramdisk.size; | 329 | initrd_end = initrd_start + m68k_ramdisk.size; |
349 | printk("initrd: %08lx - %08lx\n", initrd_start, initrd_end); | 330 | printk("initrd: %08lx - %08lx\n", initrd_start, initrd_end); |
@@ -362,8 +343,6 @@ void __init setup_arch(char **cmdline_p) | |||
362 | 343 | ||
363 | #endif /* !CONFIG_SUN3 */ | 344 | #endif /* !CONFIG_SUN3 */ |
364 | 345 | ||
365 | paging_init(); | ||
366 | |||
367 | /* set ISA defs early as possible */ | 346 | /* set ISA defs early as possible */ |
368 | #if defined(CONFIG_ISA) && defined(MULTI_ISA) | 347 | #if defined(CONFIG_ISA) && defined(MULTI_ISA) |
369 | #if defined(CONFIG_Q40) | 348 | #if defined(CONFIG_Q40) |
diff --git a/arch/m68k/kernel/vmlinux-std.lds b/arch/m68k/kernel/vmlinux-std.lds index 78f139226a1b..40f02b128f22 100644 --- a/arch/m68k/kernel/vmlinux-std.lds +++ b/arch/m68k/kernel/vmlinux-std.lds | |||
@@ -60,6 +60,11 @@ SECTIONS | |||
60 | __con_initcall_start = .; | 60 | __con_initcall_start = .; |
61 | .con_initcall.init : { *(.con_initcall.init) } | 61 | .con_initcall.init : { *(.con_initcall.init) } |
62 | __con_initcall_end = .; | 62 | __con_initcall_end = .; |
63 | .m68k_fixup : { | ||
64 | __start_fixup = .; | ||
65 | *(.m68k_fixup) | ||
66 | __stop_fixup = .; | ||
67 | } | ||
63 | SECURITY_INIT | 68 | SECURITY_INIT |
64 | #ifdef CONFIG_BLK_DEV_INITRD | 69 | #ifdef CONFIG_BLK_DEV_INITRD |
65 | . = ALIGN(8192); | 70 | . = ALIGN(8192); |
diff --git a/arch/m68k/kernel/vmlinux-sun3.lds b/arch/m68k/kernel/vmlinux-sun3.lds index c8999b2db23b..f06425b6d206 100644 --- a/arch/m68k/kernel/vmlinux-sun3.lds +++ b/arch/m68k/kernel/vmlinux-sun3.lds | |||
@@ -54,6 +54,11 @@ __init_begin = .; | |||
54 | __con_initcall_start = .; | 54 | __con_initcall_start = .; |
55 | .con_initcall.init : { *(.con_initcall.init) } | 55 | .con_initcall.init : { *(.con_initcall.init) } |
56 | __con_initcall_end = .; | 56 | __con_initcall_end = .; |
57 | .m68k_fixup : { | ||
58 | __start_fixup = .; | ||
59 | *(.m68k_fixup) | ||
60 | __stop_fixup = .; | ||
61 | } | ||
57 | SECURITY_INIT | 62 | SECURITY_INIT |
58 | #ifdef CONFIG_BLK_DEV_INITRD | 63 | #ifdef CONFIG_BLK_DEV_INITRD |
59 | . = ALIGN(8192); | 64 | . = ALIGN(8192); |
diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c index ab90213e5c54..f1de19e1dde6 100644 --- a/arch/m68k/mm/init.c +++ b/arch/m68k/mm/init.c | |||
@@ -7,6 +7,7 @@ | |||
7 | * to motorola.c and sun3mmu.c | 7 | * to motorola.c and sun3mmu.c |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/module.h> | ||
10 | #include <linux/signal.h> | 11 | #include <linux/signal.h> |
11 | #include <linux/sched.h> | 12 | #include <linux/sched.h> |
12 | #include <linux/mm.h> | 13 | #include <linux/mm.h> |
@@ -31,6 +32,37 @@ | |||
31 | 32 | ||
32 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | 33 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); |
33 | 34 | ||
35 | static bootmem_data_t __initdata bootmem_data[MAX_NUMNODES]; | ||
36 | |||
37 | pg_data_t pg_data_map[MAX_NUMNODES]; | ||
38 | EXPORT_SYMBOL(pg_data_map); | ||
39 | |||
40 | int m68k_virt_to_node_shift; | ||
41 | |||
42 | #ifndef CONFIG_SINGLE_MEMORY_CHUNK | ||
43 | pg_data_t *pg_data_table[65]; | ||
44 | EXPORT_SYMBOL(pg_data_table); | ||
45 | #endif | ||
46 | |||
47 | void m68k_setup_node(int node) | ||
48 | { | ||
49 | #ifndef CONFIG_SINGLE_MEMORY_CHUNK | ||
50 | struct mem_info *info = m68k_memory + node; | ||
51 | int i, end; | ||
52 | |||
53 | i = (unsigned long)phys_to_virt(info->addr) >> __virt_to_node_shift(); | ||
54 | end = (unsigned long)phys_to_virt(info->addr + info->size - 1) >> __virt_to_node_shift(); | ||
55 | for (; i <= end; i++) { | ||
56 | if (pg_data_table[i]) | ||
57 | printk("overlap at %u for chunk %u\n", i, node); | ||
58 | pg_data_table[i] = pg_data_map + node; | ||
59 | } | ||
60 | #endif | ||
61 | pg_data_map[node].bdata = bootmem_data + node; | ||
62 | node_set_online(node); | ||
63 | } | ||
64 | |||
65 | |||
34 | /* | 66 | /* |
35 | * ZERO_PAGE is a special page that is used for zero-initialized | 67 | * ZERO_PAGE is a special page that is used for zero-initialized |
36 | * data and COW. | 68 | * data and COW. |
@@ -40,52 +72,51 @@ void *empty_zero_page; | |||
40 | 72 | ||
41 | void show_mem(void) | 73 | void show_mem(void) |
42 | { | 74 | { |
43 | unsigned long i; | 75 | pg_data_t *pgdat; |
44 | int free = 0, total = 0, reserved = 0, shared = 0; | 76 | int free = 0, total = 0, reserved = 0, shared = 0; |
45 | int cached = 0; | 77 | int cached = 0; |
46 | 78 | int i; | |
47 | printk("\nMem-info:\n"); | 79 | |
48 | show_free_areas(); | 80 | printk("\nMem-info:\n"); |
49 | printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); | 81 | show_free_areas(); |
50 | i = max_mapnr; | 82 | printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); |
51 | while (i-- > 0) { | 83 | for_each_online_pgdat(pgdat) { |
52 | total++; | 84 | for (i = 0; i < pgdat->node_spanned_pages; i++) { |
53 | if (PageReserved(mem_map+i)) | 85 | struct page *page = pgdat->node_mem_map + i; |
54 | reserved++; | 86 | total++; |
55 | else if (PageSwapCache(mem_map+i)) | 87 | if (PageReserved(page)) |
56 | cached++; | 88 | reserved++; |
57 | else if (!page_count(mem_map+i)) | 89 | else if (PageSwapCache(page)) |
58 | free++; | 90 | cached++; |
59 | else | 91 | else if (!page_count(page)) |
60 | shared += page_count(mem_map+i) - 1; | 92 | free++; |
61 | } | 93 | else |
62 | printk("%d pages of RAM\n",total); | 94 | shared += page_count(page) - 1; |
63 | printk("%d free pages\n",free); | 95 | } |
64 | printk("%d reserved pages\n",reserved); | 96 | } |
65 | printk("%d pages shared\n",shared); | 97 | printk("%d pages of RAM\n",total); |
66 | printk("%d pages swap cached\n",cached); | 98 | printk("%d free pages\n",free); |
99 | printk("%d reserved pages\n",reserved); | ||
100 | printk("%d pages shared\n",shared); | ||
101 | printk("%d pages swap cached\n",cached); | ||
67 | } | 102 | } |
68 | 103 | ||
69 | extern void init_pointer_table(unsigned long ptable); | 104 | extern void init_pointer_table(unsigned long ptable); |
70 | 105 | ||
71 | /* References to section boundaries */ | 106 | /* References to section boundaries */ |
72 | 107 | ||
73 | extern char _text, _etext, _edata, __bss_start, _end; | 108 | extern char _text[], _etext[]; |
74 | extern char __init_begin, __init_end; | 109 | extern char __init_begin[], __init_end[]; |
75 | 110 | ||
76 | extern pmd_t *zero_pgtable; | 111 | extern pmd_t *zero_pgtable; |
77 | 112 | ||
78 | void __init mem_init(void) | 113 | void __init mem_init(void) |
79 | { | 114 | { |
115 | pg_data_t *pgdat; | ||
80 | int codepages = 0; | 116 | int codepages = 0; |
81 | int datapages = 0; | 117 | int datapages = 0; |
82 | int initpages = 0; | 118 | int initpages = 0; |
83 | unsigned long tmp; | ||
84 | #ifndef CONFIG_SUN3 | ||
85 | int i; | 119 | int i; |
86 | #endif | ||
87 | |||
88 | max_mapnr = num_physpages = (((unsigned long)high_memory - PAGE_OFFSET) >> PAGE_SHIFT); | ||
89 | 120 | ||
90 | #ifdef CONFIG_ATARI | 121 | #ifdef CONFIG_ATARI |
91 | if (MACH_IS_ATARI) | 122 | if (MACH_IS_ATARI) |
@@ -93,19 +124,25 @@ void __init mem_init(void) | |||
93 | #endif | 124 | #endif |
94 | 125 | ||
95 | /* this will put all memory onto the freelists */ | 126 | /* this will put all memory onto the freelists */ |
96 | totalram_pages = free_all_bootmem(); | 127 | totalram_pages = num_physpages = 0; |
97 | 128 | for_each_online_pgdat(pgdat) { | |
98 | for (tmp = PAGE_OFFSET ; tmp < (unsigned long)high_memory; tmp += PAGE_SIZE) { | 129 | num_physpages += pgdat->node_present_pages; |
99 | if (PageReserved(virt_to_page(tmp))) { | 130 | |
100 | if (tmp >= (unsigned long)&_text | 131 | totalram_pages += free_all_bootmem_node(pgdat); |
101 | && tmp < (unsigned long)&_etext) | 132 | for (i = 0; i < pgdat->node_spanned_pages; i++) { |
133 | struct page *page = pgdat->node_mem_map + i; | ||
134 | char *addr = page_to_virt(page); | ||
135 | |||
136 | if (!PageReserved(page)) | ||
137 | continue; | ||
138 | if (addr >= _text && | ||
139 | addr < _etext) | ||
102 | codepages++; | 140 | codepages++; |
103 | else if (tmp >= (unsigned long) &__init_begin | 141 | else if (addr >= __init_begin && |
104 | && tmp < (unsigned long) &__init_end) | 142 | addr < __init_end) |
105 | initpages++; | 143 | initpages++; |
106 | else | 144 | else |
107 | datapages++; | 145 | datapages++; |
108 | continue; | ||
109 | } | 146 | } |
110 | } | 147 | } |
111 | 148 | ||
@@ -124,7 +161,7 @@ void __init mem_init(void) | |||
124 | 161 | ||
125 | printk("Memory: %luk/%luk available (%dk kernel code, %dk data, %dk init)\n", | 162 | printk("Memory: %luk/%luk available (%dk kernel code, %dk data, %dk init)\n", |
126 | (unsigned long)nr_free_pages() << (PAGE_SHIFT-10), | 163 | (unsigned long)nr_free_pages() << (PAGE_SHIFT-10), |
127 | max_mapnr << (PAGE_SHIFT-10), | 164 | totalram_pages << (PAGE_SHIFT-10), |
128 | codepages << (PAGE_SHIFT-10), | 165 | codepages << (PAGE_SHIFT-10), |
129 | datapages << (PAGE_SHIFT-10), | 166 | datapages << (PAGE_SHIFT-10), |
130 | initpages << (PAGE_SHIFT-10)); | 167 | initpages << (PAGE_SHIFT-10)); |
diff --git a/arch/m68k/mm/memory.c b/arch/m68k/mm/memory.c index 13c0b4ad01eb..b7473525b431 100644 --- a/arch/m68k/mm/memory.c +++ b/arch/m68k/mm/memory.c | |||
@@ -127,67 +127,6 @@ int free_pointer_table (pmd_t *ptable) | |||
127 | return 0; | 127 | return 0; |
128 | } | 128 | } |
129 | 129 | ||
130 | #ifdef DEBUG_INVALID_PTOV | ||
131 | int mm_inv_cnt = 5; | ||
132 | #endif | ||
133 | |||
134 | #ifndef CONFIG_SINGLE_MEMORY_CHUNK | ||
135 | /* | ||
136 | * The following two routines map from a physical address to a kernel | ||
137 | * virtual address and vice versa. | ||
138 | */ | ||
139 | unsigned long mm_vtop(unsigned long vaddr) | ||
140 | { | ||
141 | int i=0; | ||
142 | unsigned long voff = (unsigned long)vaddr - PAGE_OFFSET; | ||
143 | |||
144 | do { | ||
145 | if (voff < m68k_memory[i].size) { | ||
146 | #ifdef DEBUGPV | ||
147 | printk ("VTOP(%p)=%lx\n", vaddr, | ||
148 | m68k_memory[i].addr + voff); | ||
149 | #endif | ||
150 | return m68k_memory[i].addr + voff; | ||
151 | } | ||
152 | voff -= m68k_memory[i].size; | ||
153 | } while (++i < m68k_num_memory); | ||
154 | |||
155 | /* As a special case allow `__pa(high_memory)'. */ | ||
156 | if (voff == 0) | ||
157 | return m68k_memory[i-1].addr + m68k_memory[i-1].size; | ||
158 | |||
159 | return -1; | ||
160 | } | ||
161 | EXPORT_SYMBOL(mm_vtop); | ||
162 | |||
163 | unsigned long mm_ptov (unsigned long paddr) | ||
164 | { | ||
165 | int i = 0; | ||
166 | unsigned long poff, voff = PAGE_OFFSET; | ||
167 | |||
168 | do { | ||
169 | poff = paddr - m68k_memory[i].addr; | ||
170 | if (poff < m68k_memory[i].size) { | ||
171 | #ifdef DEBUGPV | ||
172 | printk ("PTOV(%lx)=%lx\n", paddr, poff + voff); | ||
173 | #endif | ||
174 | return poff + voff; | ||
175 | } | ||
176 | voff += m68k_memory[i].size; | ||
177 | } while (++i < m68k_num_memory); | ||
178 | |||
179 | #ifdef DEBUG_INVALID_PTOV | ||
180 | if (mm_inv_cnt > 0) { | ||
181 | mm_inv_cnt--; | ||
182 | printk("Invalid use of phys_to_virt(0x%lx) at 0x%p!\n", | ||
183 | paddr, __builtin_return_address(0)); | ||
184 | } | ||
185 | #endif | ||
186 | return -1; | ||
187 | } | ||
188 | EXPORT_SYMBOL(mm_ptov); | ||
189 | #endif | ||
190 | |||
191 | /* invalidate page in both caches */ | 130 | /* invalidate page in both caches */ |
192 | static inline void clear040(unsigned long paddr) | 131 | static inline void clear040(unsigned long paddr) |
193 | { | 132 | { |
@@ -354,15 +293,3 @@ void cache_push (unsigned long paddr, int len) | |||
354 | } | 293 | } |
355 | EXPORT_SYMBOL(cache_push); | 294 | EXPORT_SYMBOL(cache_push); |
356 | 295 | ||
357 | #ifndef CONFIG_SINGLE_MEMORY_CHUNK | ||
358 | int mm_end_of_chunk (unsigned long addr, int len) | ||
359 | { | ||
360 | int i; | ||
361 | |||
362 | for (i = 0; i < m68k_num_memory; i++) | ||
363 | if (m68k_memory[i].addr + m68k_memory[i].size == addr + len) | ||
364 | return 1; | ||
365 | return 0; | ||
366 | } | ||
367 | EXPORT_SYMBOL(mm_end_of_chunk); | ||
368 | #endif | ||
diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c index afcccdc6ad45..7d571a2b44dd 100644 --- a/arch/m68k/mm/motorola.c +++ b/arch/m68k/mm/motorola.c | |||
@@ -43,6 +43,11 @@ unsigned long mm_cachebits; | |||
43 | EXPORT_SYMBOL(mm_cachebits); | 43 | EXPORT_SYMBOL(mm_cachebits); |
44 | #endif | 44 | #endif |
45 | 45 | ||
46 | /* size of memory already mapped in head.S */ | ||
47 | #define INIT_MAPPED_SIZE (4UL<<20) | ||
48 | |||
49 | extern unsigned long availmem; | ||
50 | |||
46 | static pte_t * __init kernel_page_table(void) | 51 | static pte_t * __init kernel_page_table(void) |
47 | { | 52 | { |
48 | pte_t *ptablep; | 53 | pte_t *ptablep; |
@@ -98,19 +103,20 @@ static pmd_t * __init kernel_ptr_table(void) | |||
98 | return last_pgtable; | 103 | return last_pgtable; |
99 | } | 104 | } |
100 | 105 | ||
101 | static unsigned long __init | 106 | static void __init map_node(int node) |
102 | map_chunk (unsigned long addr, long size) | ||
103 | { | 107 | { |
104 | #define PTRTREESIZE (256*1024) | 108 | #define PTRTREESIZE (256*1024) |
105 | #define ROOTTREESIZE (32*1024*1024) | 109 | #define ROOTTREESIZE (32*1024*1024) |
106 | static unsigned long virtaddr = PAGE_OFFSET; | 110 | unsigned long physaddr, virtaddr, size; |
107 | unsigned long physaddr; | ||
108 | pgd_t *pgd_dir; | 111 | pgd_t *pgd_dir; |
109 | pmd_t *pmd_dir; | 112 | pmd_t *pmd_dir; |
110 | pte_t *pte_dir; | 113 | pte_t *pte_dir; |
111 | 114 | ||
112 | physaddr = (addr | m68k_supervisor_cachemode | | 115 | size = m68k_memory[node].size; |
113 | _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY); | 116 | physaddr = m68k_memory[node].addr; |
117 | virtaddr = (unsigned long)phys_to_virt(physaddr); | ||
118 | physaddr |= m68k_supervisor_cachemode | | ||
119 | _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY; | ||
114 | if (CPU_IS_040_OR_060) | 120 | if (CPU_IS_040_OR_060) |
115 | physaddr |= _PAGE_GLOBAL040; | 121 | physaddr |= _PAGE_GLOBAL040; |
116 | 122 | ||
@@ -190,8 +196,6 @@ map_chunk (unsigned long addr, long size) | |||
190 | #ifdef DEBUG | 196 | #ifdef DEBUG |
191 | printk("\n"); | 197 | printk("\n"); |
192 | #endif | 198 | #endif |
193 | |||
194 | return virtaddr; | ||
195 | } | 199 | } |
196 | 200 | ||
197 | /* | 201 | /* |
@@ -200,15 +204,16 @@ map_chunk (unsigned long addr, long size) | |||
200 | */ | 204 | */ |
201 | void __init paging_init(void) | 205 | void __init paging_init(void) |
202 | { | 206 | { |
203 | int chunk; | ||
204 | unsigned long mem_avail = 0; | ||
205 | unsigned long zones_size[MAX_NR_ZONES] = { 0, }; | 207 | unsigned long zones_size[MAX_NR_ZONES] = { 0, }; |
208 | unsigned long min_addr, max_addr; | ||
209 | unsigned long addr, size, end; | ||
210 | int i; | ||
206 | 211 | ||
207 | #ifdef DEBUG | 212 | #ifdef DEBUG |
208 | { | 213 | { |
209 | extern unsigned long availmem; | 214 | extern unsigned long availmem; |
210 | printk ("start of paging_init (%p, %lx, %lx, %lx)\n", | 215 | printk ("start of paging_init (%p, %lx)\n", |
211 | kernel_pg_dir, availmem, start_mem, end_mem); | 216 | kernel_pg_dir, availmem); |
212 | } | 217 | } |
213 | #endif | 218 | #endif |
214 | 219 | ||
@@ -222,24 +227,62 @@ void __init paging_init(void) | |||
222 | pgprot_val(protection_map[i]) |= _PAGE_CACHE040; | 227 | pgprot_val(protection_map[i]) |= _PAGE_CACHE040; |
223 | } | 228 | } |
224 | 229 | ||
230 | min_addr = m68k_memory[0].addr; | ||
231 | max_addr = min_addr + m68k_memory[0].size; | ||
232 | for (i = 1; i < m68k_num_memory;) { | ||
233 | if (m68k_memory[i].addr < min_addr) { | ||
234 | printk("Ignoring memory chunk at 0x%lx:0x%lx before the first chunk\n", | ||
235 | m68k_memory[i].addr, m68k_memory[i].size); | ||
236 | printk("Fix your bootloader or use a memfile to make use of this area!\n"); | ||
237 | m68k_num_memory--; | ||
238 | memmove(m68k_memory + i, m68k_memory + i + 1, | ||
239 | (m68k_num_memory - i) * sizeof(struct mem_info)); | ||
240 | continue; | ||
241 | } | ||
242 | addr = m68k_memory[i].addr + m68k_memory[i].size; | ||
243 | if (addr > max_addr) | ||
244 | max_addr = addr; | ||
245 | i++; | ||
246 | } | ||
247 | m68k_memoffset = min_addr - PAGE_OFFSET; | ||
248 | m68k_virt_to_node_shift = fls(max_addr - min_addr - 1) - 6; | ||
249 | |||
250 | module_fixup(NULL, __start_fixup, __stop_fixup); | ||
251 | flush_icache(); | ||
252 | |||
253 | high_memory = phys_to_virt(max_addr); | ||
254 | |||
255 | min_low_pfn = availmem >> PAGE_SHIFT; | ||
256 | max_low_pfn = max_addr >> PAGE_SHIFT; | ||
257 | |||
258 | for (i = 0; i < m68k_num_memory; i++) { | ||
259 | addr = m68k_memory[i].addr; | ||
260 | end = addr + m68k_memory[i].size; | ||
261 | m68k_setup_node(i); | ||
262 | availmem = PAGE_ALIGN(availmem); | ||
263 | availmem += init_bootmem_node(NODE_DATA(i), | ||
264 | availmem >> PAGE_SHIFT, | ||
265 | addr >> PAGE_SHIFT, | ||
266 | end >> PAGE_SHIFT); | ||
267 | } | ||
268 | |||
225 | /* | 269 | /* |
226 | * Map the physical memory available into the kernel virtual | 270 | * Map the physical memory available into the kernel virtual |
227 | * address space. It may allocate some memory for page | 271 | * address space. First initialize the bootmem allocator with |
228 | * tables and thus modify availmem. | 272 | * the memory we already mapped, so map_node() has something |
273 | * to allocate. | ||
229 | */ | 274 | */ |
275 | addr = m68k_memory[0].addr; | ||
276 | size = m68k_memory[0].size; | ||
277 | free_bootmem_node(NODE_DATA(0), availmem, min(INIT_MAPPED_SIZE, size) - (availmem - addr)); | ||
278 | map_node(0); | ||
279 | if (size > INIT_MAPPED_SIZE) | ||
280 | free_bootmem_node(NODE_DATA(0), addr + INIT_MAPPED_SIZE, size - INIT_MAPPED_SIZE); | ||
230 | 281 | ||
231 | for (chunk = 0; chunk < m68k_num_memory; chunk++) { | 282 | for (i = 1; i < m68k_num_memory; i++) |
232 | mem_avail = map_chunk (m68k_memory[chunk].addr, | 283 | map_node(i); |
233 | m68k_memory[chunk].size); | ||
234 | |||
235 | } | ||
236 | 284 | ||
237 | flush_tlb_all(); | 285 | flush_tlb_all(); |
238 | #ifdef DEBUG | ||
239 | printk ("memory available is %ldKB\n", mem_avail >> 10); | ||
240 | printk ("start_mem is %#lx\nvirtual_end is %#lx\n", | ||
241 | start_mem, end_mem); | ||
242 | #endif | ||
243 | 286 | ||
244 | /* | 287 | /* |
245 | * initialize the bad page table and bad page to point | 288 | * initialize the bad page table and bad page to point |
@@ -256,14 +299,11 @@ void __init paging_init(void) | |||
256 | #ifdef DEBUG | 299 | #ifdef DEBUG |
257 | printk ("before free_area_init\n"); | 300 | printk ("before free_area_init\n"); |
258 | #endif | 301 | #endif |
259 | zones_size[ZONE_DMA] = (mach_max_dma_address < (unsigned long)high_memory ? | 302 | for (i = 0; i < m68k_num_memory; i++) { |
260 | (mach_max_dma_address+1) : (unsigned long)high_memory); | 303 | zones_size[ZONE_DMA] = m68k_memory[i].size >> PAGE_SHIFT; |
261 | zones_size[ZONE_NORMAL] = (unsigned long)high_memory - zones_size[0]; | 304 | free_area_init_node(i, pg_data_map + i, zones_size, |
262 | 305 | m68k_memory[i].addr >> PAGE_SHIFT, NULL); | |
263 | zones_size[ZONE_DMA] = (zones_size[ZONE_DMA] - PAGE_OFFSET) >> PAGE_SHIFT; | 306 | } |
264 | zones_size[ZONE_NORMAL] >>= PAGE_SHIFT; | ||
265 | |||
266 | free_area_init(zones_size); | ||
267 | } | 307 | } |
268 | 308 | ||
269 | extern char __init_begin, __init_end; | 309 | extern char __init_begin, __init_end; |
diff --git a/arch/m68k/sun3/config.c b/arch/m68k/sun3/config.c index 4851b8437a87..c0fbd278fbb1 100644 --- a/arch/m68k/sun3/config.c +++ b/arch/m68k/sun3/config.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <asm/contregs.h> | 21 | #include <asm/contregs.h> |
22 | #include <asm/movs.h> | 22 | #include <asm/movs.h> |
23 | #include <asm/pgtable.h> | 23 | #include <asm/pgtable.h> |
24 | #include <asm/pgalloc.h> | ||
24 | #include <asm/sun3-head.h> | 25 | #include <asm/sun3-head.h> |
25 | #include <asm/sun3mmu.h> | 26 | #include <asm/sun3mmu.h> |
26 | #include <asm/rtc.h> | 27 | #include <asm/rtc.h> |
@@ -127,6 +128,7 @@ void __init sun3_bootmem_alloc(unsigned long memory_start, unsigned long memory_ | |||
127 | high_memory = (void *)memory_end; | 128 | high_memory = (void *)memory_end; |
128 | availmem = memory_start; | 129 | availmem = memory_start; |
129 | 130 | ||
131 | m68k_setup_node(0); | ||
130 | availmem += init_bootmem_node(NODE_DATA(0), start_page, 0, num_pages); | 132 | availmem += init_bootmem_node(NODE_DATA(0), start_page, 0, num_pages); |
131 | availmem = (availmem + (PAGE_SIZE-1)) & PAGE_MASK; | 133 | availmem = (availmem + (PAGE_SIZE-1)) & PAGE_MASK; |
132 | 134 | ||
diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c index 617d29832e19..cbddeb38ffda 100644 --- a/arch/sparc/lib/atomic32.c +++ b/arch/sparc/lib/atomic32.c | |||
@@ -124,10 +124,10 @@ unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new) | |||
124 | unsigned long flags; | 124 | unsigned long flags; |
125 | u32 prev; | 125 | u32 prev; |
126 | 126 | ||
127 | spin_lock_irqsave(ATOMIC_HASH(addr), flags); | 127 | spin_lock_irqsave(ATOMIC_HASH(ptr), flags); |
128 | if ((prev = *ptr) == old) | 128 | if ((prev = *ptr) == old) |
129 | *ptr = new; | 129 | *ptr = new; |
130 | spin_unlock_irqrestore(ATOMIC_HASH(addr), flags); | 130 | spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags); |
131 | 131 | ||
132 | return (unsigned long)prev; | 132 | return (unsigned long)prev; |
133 | } | 133 | } |
diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S index 8f10dda0f5c0..ed712e0b3372 100644 --- a/arch/sparc64/kernel/entry.S +++ b/arch/sparc64/kernel/entry.S | |||
@@ -2498,3 +2498,75 @@ sun4v_vintr_set_target: | |||
2498 | retl | 2498 | retl |
2499 | nop | 2499 | nop |
2500 | .size sun4v_vintr_set_target, .-sun4v_vintr_set_target | 2500 | .size sun4v_vintr_set_target, .-sun4v_vintr_set_target |
2501 | |||
2502 | /* %o0: NCS sub-function | ||
2503 | * %o1: sub-function arg real-address | ||
2504 | * %o2: sub-function arg size | ||
2505 | * | ||
2506 | * returns %o0: status | ||
2507 | */ | ||
2508 | .globl sun4v_ncs_request | ||
2509 | .type sun4v_ncs_request,#function | ||
2510 | sun4v_ncs_request: | ||
2511 | mov HV_FAST_NCS_REQUEST, %o5 | ||
2512 | ta HV_FAST_TRAP | ||
2513 | retl | ||
2514 | nop | ||
2515 | .size sun4v_ncs_request, .-sun4v_ncs_request | ||
2516 | |||
2517 | .globl sun4v_scv_send | ||
2518 | .type sun4v_scv_send,#function | ||
2519 | sun4v_scv_send: | ||
2520 | save %sp, -192, %sp | ||
2521 | mov %i0, %o0 | ||
2522 | mov %i1, %o1 | ||
2523 | mov %i2, %o2 | ||
2524 | mov HV_FAST_SVC_SEND, %o5 | ||
2525 | ta HV_FAST_TRAP | ||
2526 | stx %o1, [%i3] | ||
2527 | ret | ||
2528 | restore | ||
2529 | .size sun4v_scv_send, .-sun4v_scv_send | ||
2530 | |||
2531 | .globl sun4v_scv_recv | ||
2532 | .type sun4v_scv_recv,#function | ||
2533 | sun4v_scv_recv: | ||
2534 | save %sp, -192, %sp | ||
2535 | mov %i0, %o0 | ||
2536 | mov %i1, %o1 | ||
2537 | mov %i2, %o2 | ||
2538 | mov HV_FAST_SVC_RECV, %o5 | ||
2539 | ta HV_FAST_TRAP | ||
2540 | stx %o1, [%i3] | ||
2541 | ret | ||
2542 | restore | ||
2543 | .size sun4v_scv_recv, .-sun4v_scv_recv | ||
2544 | |||
2545 | .globl sun4v_scv_getstatus | ||
2546 | .type sun4v_scv_getstatus,#function | ||
2547 | sun4v_scv_getstatus: | ||
2548 | mov HV_FAST_SVC_GETSTATUS, %o5 | ||
2549 | mov %o1, %o4 | ||
2550 | ta HV_FAST_TRAP | ||
2551 | stx %o1, [%o4] | ||
2552 | retl | ||
2553 | nop | ||
2554 | .size sun4v_scv_getstatus, .-sun4v_scv_getstatus | ||
2555 | |||
2556 | .globl sun4v_scv_setstatus | ||
2557 | .type sun4v_scv_setstatus,#function | ||
2558 | sun4v_scv_setstatus: | ||
2559 | mov HV_FAST_SVC_SETSTATUS, %o5 | ||
2560 | ta HV_FAST_TRAP | ||
2561 | retl | ||
2562 | nop | ||
2563 | .size sun4v_scv_setstatus, .-sun4v_scv_setstatus | ||
2564 | |||
2565 | .globl sun4v_scv_clrstatus | ||
2566 | .type sun4v_scv_clrstatus,#function | ||
2567 | sun4v_scv_clrstatus: | ||
2568 | mov HV_FAST_SVC_CLRSTATUS, %o5 | ||
2569 | ta HV_FAST_TRAP | ||
2570 | retl | ||
2571 | nop | ||
2572 | .size sun4v_scv_clrstatus, .-sun4v_scv_clrstatus | ||
diff --git a/crypto/cryptd.c b/crypto/cryptd.c index 3ff4e1f0f032..ac6dce2e7596 100644 --- a/crypto/cryptd.c +++ b/crypto/cryptd.c | |||
@@ -298,7 +298,7 @@ static inline int cryptd_create_thread(struct cryptd_state *state, | |||
298 | mutex_init(&state->mutex); | 298 | mutex_init(&state->mutex); |
299 | crypto_init_queue(&state->queue, CRYPTD_MAX_QLEN); | 299 | crypto_init_queue(&state->queue, CRYPTD_MAX_QLEN); |
300 | 300 | ||
301 | state->task = kthread_create(fn, state, name); | 301 | state->task = kthread_run(fn, state, name); |
302 | if (IS_ERR(state->task)) | 302 | if (IS_ERR(state->task)) |
303 | return PTR_ERR(state->task); | 303 | return PTR_ERR(state->task); |
304 | 304 | ||
@@ -316,6 +316,8 @@ static int cryptd_thread(void *data) | |||
316 | struct cryptd_state *state = data; | 316 | struct cryptd_state *state = data; |
317 | int stop; | 317 | int stop; |
318 | 318 | ||
319 | current->flags |= PF_NOFREEZE; | ||
320 | |||
319 | do { | 321 | do { |
320 | struct crypto_async_request *req, *backlog; | 322 | struct crypto_async_request *req, *backlog; |
321 | 323 | ||
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 3ca9c610c110..af625147df62 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -3783,6 +3783,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | |||
3783 | { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ }, | 3783 | { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ }, |
3784 | /* NCQ is broken */ | 3784 | /* NCQ is broken */ |
3785 | { "Maxtor 6L250S0", "BANC1G10", ATA_HORKAGE_NONCQ }, | 3785 | { "Maxtor 6L250S0", "BANC1G10", ATA_HORKAGE_NONCQ }, |
3786 | { "Maxtor 6B200M0", "BANC1B10", ATA_HORKAGE_NONCQ }, | ||
3786 | /* NCQ hard hangs device under heavier load, needs hard power cycle */ | 3787 | /* NCQ hard hangs device under heavier load, needs hard power cycle */ |
3787 | { "Maxtor 6B250S0", "BANC1B70", ATA_HORKAGE_NONCQ }, | 3788 | { "Maxtor 6B250S0", "BANC1B70", ATA_HORKAGE_NONCQ }, |
3788 | /* Blacklist entries taken from Silicon Image 3124/3132 | 3789 | /* Blacklist entries taken from Silicon Image 3124/3132 |
diff --git a/drivers/char/drm/drm_drawable.c b/drivers/char/drm/drm_drawable.c index de37d5f74563..b33313be2547 100644 --- a/drivers/char/drm/drm_drawable.c +++ b/drivers/char/drm/drm_drawable.c | |||
@@ -172,38 +172,49 @@ int drm_rmdraw(DRM_IOCTL_ARGS) | |||
172 | 172 | ||
173 | bitfield_length = idx + 1; | 173 | bitfield_length = idx + 1; |
174 | 174 | ||
175 | if (idx != id / (8 * sizeof(*bitfield))) | 175 | bitfield = NULL; |
176 | bitfield = drm_alloc(bitfield_length * | ||
177 | sizeof(*bitfield), DRM_MEM_BUFS); | ||
178 | 176 | ||
179 | if (!bitfield && bitfield_length) { | 177 | if (bitfield_length) { |
180 | bitfield = dev->drw_bitfield; | 178 | if (bitfield_length != dev->drw_bitfield_length) |
181 | bitfield_length = dev->drw_bitfield_length; | 179 | bitfield = drm_alloc(bitfield_length * |
180 | sizeof(*bitfield), | ||
181 | DRM_MEM_BUFS); | ||
182 | |||
183 | if (!bitfield) { | ||
184 | bitfield = dev->drw_bitfield; | ||
185 | bitfield_length = dev->drw_bitfield_length; | ||
186 | } | ||
182 | } | 187 | } |
183 | } | 188 | } |
184 | 189 | ||
185 | if (bitfield != dev->drw_bitfield) { | 190 | if (bitfield != dev->drw_bitfield) { |
186 | info_length = 8 * sizeof(*bitfield) * bitfield_length; | 191 | info_length = 8 * sizeof(*bitfield) * bitfield_length; |
187 | 192 | ||
188 | info = drm_alloc(info_length * sizeof(*info), DRM_MEM_BUFS); | 193 | if (info_length) { |
194 | info = drm_alloc(info_length * sizeof(*info), | ||
195 | DRM_MEM_BUFS); | ||
189 | 196 | ||
190 | if (!info && info_length) { | 197 | if (!info) { |
191 | info = dev->drw_info; | 198 | info = dev->drw_info; |
192 | info_length = dev->drw_info_length; | 199 | info_length = dev->drw_info_length; |
193 | } | 200 | } |
201 | } else | ||
202 | info = NULL; | ||
194 | 203 | ||
195 | spin_lock_irqsave(&dev->drw_lock, irqflags); | 204 | spin_lock_irqsave(&dev->drw_lock, irqflags); |
196 | 205 | ||
197 | memcpy(bitfield, dev->drw_bitfield, bitfield_length * | 206 | if (bitfield) |
198 | sizeof(*bitfield)); | 207 | memcpy(bitfield, dev->drw_bitfield, bitfield_length * |
208 | sizeof(*bitfield)); | ||
199 | drm_free(dev->drw_bitfield, sizeof(*bitfield) * | 209 | drm_free(dev->drw_bitfield, sizeof(*bitfield) * |
200 | dev->drw_bitfield_length, DRM_MEM_BUFS); | 210 | dev->drw_bitfield_length, DRM_MEM_BUFS); |
201 | dev->drw_bitfield = bitfield; | 211 | dev->drw_bitfield = bitfield; |
202 | dev->drw_bitfield_length = bitfield_length; | 212 | dev->drw_bitfield_length = bitfield_length; |
203 | 213 | ||
204 | if (info != dev->drw_info) { | 214 | if (info != dev->drw_info) { |
205 | memcpy(info, dev->drw_info, info_length * | 215 | if (info) |
206 | sizeof(*info)); | 216 | memcpy(info, dev->drw_info, info_length * |
217 | sizeof(*info)); | ||
207 | drm_free(dev->drw_info, sizeof(*info) * | 218 | drm_free(dev->drw_info, sizeof(*info) * |
208 | dev->drw_info_length, DRM_MEM_BUFS); | 219 | dev->drw_info_length, DRM_MEM_BUFS); |
209 | dev->drw_info = info; | 220 | dev->drw_info = info; |
diff --git a/drivers/char/drm/drm_pciids.h b/drivers/char/drm/drm_pciids.h index 31cdde83713b..177ccc07f968 100644 --- a/drivers/char/drm/drm_pciids.h +++ b/drivers/char/drm/drm_pciids.h | |||
@@ -102,13 +102,20 @@ | |||
102 | {0x1002, 0x5653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 102 | {0x1002, 0x5653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
103 | {0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP}, \ | 103 | {0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP}, \ |
104 | {0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \ | 104 | {0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \ |
105 | {0x1002, 0x5954, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ | ||
105 | {0x1002, 0x5955, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ | 106 | {0x1002, 0x5955, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ |
107 | {0x1002, 0x5974, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ | ||
108 | {0x1002, 0x5975, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ | ||
106 | {0x1002, 0x5960, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ | 109 | {0x1002, 0x5960, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ |
107 | {0x1002, 0x5961, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ | 110 | {0x1002, 0x5961, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ |
108 | {0x1002, 0x5962, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ | 111 | {0x1002, 0x5962, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ |
109 | {0x1002, 0x5964, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ | 112 | {0x1002, 0x5964, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ |
110 | {0x1002, 0x5965, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ | 113 | {0x1002, 0x5965, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ |
111 | {0x1002, 0x5969, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \ | 114 | {0x1002, 0x5969, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \ |
115 | {0x1002, 0x5a41, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ | ||
116 | {0x1002, 0x5a42, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ | ||
117 | {0x1002, 0x5a61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ | ||
118 | {0x1002, 0x5a62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ | ||
112 | {0x1002, 0x5b60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ | 119 | {0x1002, 0x5b60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ |
113 | {0x1002, 0x5b62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ | 120 | {0x1002, 0x5b62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ |
114 | {0x1002, 0x5b63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ | 121 | {0x1002, 0x5b63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ |
diff --git a/drivers/char/drm/i915_irq.c b/drivers/char/drm/i915_irq.c index 78c1ae28f17c..b92062a239f1 100644 --- a/drivers/char/drm/i915_irq.c +++ b/drivers/char/drm/i915_irq.c | |||
@@ -582,7 +582,7 @@ void i915_driver_irq_postinstall(drm_device_t * dev) | |||
582 | { | 582 | { |
583 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 583 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
584 | 584 | ||
585 | dev_priv->swaps_lock = SPIN_LOCK_UNLOCKED; | 585 | spin_lock_init(&dev_priv->swaps_lock); |
586 | INIT_LIST_HEAD(&dev_priv->vbl_swaps.head); | 586 | INIT_LIST_HEAD(&dev_priv->vbl_swaps.head); |
587 | dev_priv->swaps_pending = 0; | 587 | dev_priv->swaps_pending = 0; |
588 | 588 | ||
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c index 9fe3a38883ee..59b9943b077d 100644 --- a/drivers/net/cassini.c +++ b/drivers/net/cassini.c | |||
@@ -4920,7 +4920,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev, | |||
4920 | pci_cmd |= PCI_COMMAND_PARITY; | 4920 | pci_cmd |= PCI_COMMAND_PARITY; |
4921 | pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); | 4921 | pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); |
4922 | if (pci_set_mwi(pdev)) | 4922 | if (pci_set_mwi(pdev)) |
4923 | printk(KERN_WARNING PFX "Could enable MWI for %s\n", | 4923 | printk(KERN_WARNING PFX "Could not enable MWI for %s\n", |
4924 | pci_name(pdev)); | 4924 | pci_name(pdev)); |
4925 | 4925 | ||
4926 | /* | 4926 | /* |
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 6ccc2e95930a..1cff65fb9c43 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
@@ -1625,18 +1625,20 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_CK804_PCIE, | |||
1625 | quirk_nvidia_ck804_pcie_aer_ext_cap); | 1625 | quirk_nvidia_ck804_pcie_aer_ext_cap); |
1626 | 1626 | ||
1627 | #ifdef CONFIG_PCI_MSI | 1627 | #ifdef CONFIG_PCI_MSI |
1628 | /* The Serverworks PCI-X chipset does not support MSI. We cannot easily rely | 1628 | /* Some chipsets do not support MSI. We cannot easily rely on setting |
1629 | * on setting PCI_BUS_FLAGS_NO_MSI in its bus flags because there are actually | 1629 | * PCI_BUS_FLAGS_NO_MSI in its bus flags because there are actually |
1630 | * some other busses controlled by the chipset even if Linux is not aware of it. | 1630 | * some other busses controlled by the chipset even if Linux is not |
1631 | * Instead of setting the flag on all busses in the machine, simply disable MSI | 1631 | * aware of it. Instead of setting the flag on all busses in the |
1632 | * globally. | 1632 | * machine, simply disable MSI globally. |
1633 | */ | 1633 | */ |
1634 | static void __init quirk_svw_msi(struct pci_dev *dev) | 1634 | static void __init quirk_disable_all_msi(struct pci_dev *dev) |
1635 | { | 1635 | { |
1636 | pci_no_msi(); | 1636 | pci_no_msi(); |
1637 | printk(KERN_WARNING "PCI: MSI quirk detected. MSI deactivated.\n"); | 1637 | printk(KERN_WARNING "PCI: MSI quirk detected. MSI deactivated.\n"); |
1638 | } | 1638 | } |
1639 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_GCNB_LE, quirk_svw_msi); | 1639 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_GCNB_LE, quirk_disable_all_msi); |
1640 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS400_200, quirk_disable_all_msi); | ||
1641 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS480, quirk_disable_all_msi); | ||
1640 | 1642 | ||
1641 | /* Disable MSI on chipsets that are known to not support it */ | 1643 | /* Disable MSI on chipsets that are known to not support it */ |
1642 | static void __devinit quirk_disable_msi(struct pci_dev *dev) | 1644 | static void __devinit quirk_disable_msi(struct pci_dev *dev) |
@@ -1649,8 +1651,6 @@ static void __devinit quirk_disable_msi(struct pci_dev *dev) | |||
1649 | } | 1651 | } |
1650 | } | 1652 | } |
1651 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_msi); | 1653 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_msi); |
1652 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS400_200, quirk_disable_msi); | ||
1653 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_RS480, quirk_disable_msi); | ||
1654 | 1654 | ||
1655 | /* Go through the list of Hypertransport capabilities and | 1655 | /* Go through the list of Hypertransport capabilities and |
1656 | * return 1 if a HT MSI capability is found and enabled */ | 1656 | * return 1 if a HT MSI capability is found and enabled */ |
diff --git a/drivers/pcmcia/at91_cf.c b/drivers/pcmcia/at91_cf.c index 948efc775a78..eb6abd3f9221 100644 --- a/drivers/pcmcia/at91_cf.c +++ b/drivers/pcmcia/at91_cf.c | |||
@@ -336,16 +336,21 @@ static int at91_cf_suspend(struct platform_device *pdev, pm_message_t mesg) | |||
336 | enable_irq_wake(board->det_pin); | 336 | enable_irq_wake(board->det_pin); |
337 | if (board->irq_pin) | 337 | if (board->irq_pin) |
338 | enable_irq_wake(board->irq_pin); | 338 | enable_irq_wake(board->irq_pin); |
339 | } else { | ||
340 | disable_irq_wake(board->det_pin); | ||
341 | if (board->irq_pin) | ||
342 | disable_irq_wake(board->irq_pin); | ||
343 | } | 339 | } |
344 | return 0; | 340 | return 0; |
345 | } | 341 | } |
346 | 342 | ||
347 | static int at91_cf_resume(struct platform_device *pdev) | 343 | static int at91_cf_resume(struct platform_device *pdev) |
348 | { | 344 | { |
345 | struct at91_cf_socket *cf = platform_get_drvdata(pdev); | ||
346 | struct at91_cf_data *board = cf->board; | ||
347 | |||
348 | if (device_may_wakeup(&pdev->dev)) { | ||
349 | disable_irq_wake(board->det_pin); | ||
350 | if (board->irq_pin) | ||
351 | disable_irq_wake(board->irq_pin); | ||
352 | } | ||
353 | |||
349 | pcmcia_socket_dev_resume(&pdev->dev); | 354 | pcmcia_socket_dev_resume(&pdev->dev); |
350 | return 0; | 355 | return 0; |
351 | } | 356 | } |
diff --git a/drivers/sbus/char/flash.c b/drivers/sbus/char/flash.c index 262f01e68592..44e039865aa9 100644 --- a/drivers/sbus/char/flash.c +++ b/drivers/sbus/char/flash.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
15 | #include <linux/smp_lock.h> | 15 | #include <linux/smp_lock.h> |
16 | #include <linux/spinlock.h> | 16 | #include <linux/spinlock.h> |
17 | #include <linux/mm.h> | ||
17 | 18 | ||
18 | #include <asm/system.h> | 19 | #include <asm/system.h> |
19 | #include <asm/uaccess.h> | 20 | #include <asm/uaccess.h> |
diff --git a/drivers/video/neofb.c b/drivers/video/neofb.c index bd30aba242d0..731d7a5c5aa2 100644 --- a/drivers/video/neofb.c +++ b/drivers/video/neofb.c | |||
@@ -1286,34 +1286,36 @@ static int neofb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, | |||
1286 | if (regno >= fb->cmap.len || regno > 255) | 1286 | if (regno >= fb->cmap.len || regno > 255) |
1287 | return -EINVAL; | 1287 | return -EINVAL; |
1288 | 1288 | ||
1289 | switch (fb->var.bits_per_pixel) { | 1289 | if (fb->var.bits_per_pixel <= 8) { |
1290 | case 8: | ||
1291 | outb(regno, 0x3c8); | 1290 | outb(regno, 0x3c8); |
1292 | 1291 | ||
1293 | outb(red >> 10, 0x3c9); | 1292 | outb(red >> 10, 0x3c9); |
1294 | outb(green >> 10, 0x3c9); | 1293 | outb(green >> 10, 0x3c9); |
1295 | outb(blue >> 10, 0x3c9); | 1294 | outb(blue >> 10, 0x3c9); |
1296 | break; | 1295 | } else if (regno < 16) { |
1297 | case 16: | 1296 | switch (fb->var.bits_per_pixel) { |
1298 | ((u32 *) fb->pseudo_palette)[regno] = | 1297 | case 16: |
1298 | ((u32 *) fb->pseudo_palette)[regno] = | ||
1299 | ((red & 0xf800)) | ((green & 0xfc00) >> 5) | | 1299 | ((red & 0xf800)) | ((green & 0xfc00) >> 5) | |
1300 | ((blue & 0xf800) >> 11); | 1300 | ((blue & 0xf800) >> 11); |
1301 | break; | 1301 | break; |
1302 | case 24: | 1302 | case 24: |
1303 | ((u32 *) fb->pseudo_palette)[regno] = | 1303 | ((u32 *) fb->pseudo_palette)[regno] = |
1304 | ((red & 0xff00) << 8) | ((green & 0xff00)) | | 1304 | ((red & 0xff00) << 8) | ((green & 0xff00)) | |
1305 | ((blue & 0xff00) >> 8); | 1305 | ((blue & 0xff00) >> 8); |
1306 | break; | 1306 | break; |
1307 | #ifdef NO_32BIT_SUPPORT_YET | 1307 | #ifdef NO_32BIT_SUPPORT_YET |
1308 | case 32: | 1308 | case 32: |
1309 | ((u32 *) fb->pseudo_palette)[regno] = | 1309 | ((u32 *) fb->pseudo_palette)[regno] = |
1310 | ((transp & 0xff00) << 16) | ((red & 0xff00) << 8) | | 1310 | ((transp & 0xff00) << 16) | ((red & 0xff00) << 8) | |
1311 | ((green & 0xff00)) | ((blue & 0xff00) >> 8); | 1311 | ((green & 0xff00)) | ((blue & 0xff00) >> 8); |
1312 | break; | 1312 | break; |
1313 | #endif | 1313 | #endif |
1314 | default: | 1314 | default: |
1315 | return 1; | 1315 | return 1; |
1316 | } | ||
1316 | } | 1317 | } |
1318 | |||
1317 | return 0; | 1319 | return 0; |
1318 | } | 1320 | } |
1319 | 1321 | ||
diff --git a/fs/afs/internal.h b/fs/afs/internal.h index 2dac3ad2c44b..2c55dd94a1de 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h | |||
@@ -17,6 +17,8 @@ | |||
17 | #include <linux/rxrpc.h> | 17 | #include <linux/rxrpc.h> |
18 | #include <linux/key.h> | 18 | #include <linux/key.h> |
19 | #include <linux/workqueue.h> | 19 | #include <linux/workqueue.h> |
20 | #include <linux/sched.h> | ||
21 | |||
20 | #include "afs.h" | 22 | #include "afs.h" |
21 | #include "afs_vl.h" | 23 | #include "afs_vl.h" |
22 | 24 | ||
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c index 074791ce4ab2..b532a730cec2 100644 --- a/fs/ntfs/inode.c +++ b/fs/ntfs/inode.c | |||
@@ -140,7 +140,7 @@ static int ntfs_init_locked_inode(struct inode *vi, ntfs_attr *na) | |||
140 | if (!ni->name) | 140 | if (!ni->name) |
141 | return -ENOMEM; | 141 | return -ENOMEM; |
142 | memcpy(ni->name, na->name, i); | 142 | memcpy(ni->name, na->name, i); |
143 | ni->name[i] = 0; | 143 | ni->name[na->name_len] = 0; |
144 | } | 144 | } |
145 | return 0; | 145 | return 0; |
146 | } | 146 | } |
diff --git a/include/asm-m68k/mmzone.h b/include/asm-m68k/mmzone.h new file mode 100644 index 000000000000..e1f1ec7b7006 --- /dev/null +++ b/include/asm-m68k/mmzone.h | |||
@@ -0,0 +1,9 @@ | |||
1 | #ifndef _ASM_M68K_MMZONE_H_ | ||
2 | #define _ASM_M68K_MMZONE_H_ | ||
3 | |||
4 | extern pg_data_t pg_data_map[]; | ||
5 | |||
6 | #define NODE_DATA(nid) (&pg_data_map[nid]) | ||
7 | #define NODE_MEM_MAP(nid) (NODE_DATA(nid)->node_mem_map) | ||
8 | |||
9 | #endif /* _ASM_M68K_MMZONE_H_ */ | ||
diff --git a/include/asm-m68k/module.h b/include/asm-m68k/module.h index c6d75af2d8d3..382d20a6fc18 100644 --- a/include/asm-m68k/module.h +++ b/include/asm-m68k/module.h | |||
@@ -1,7 +1,39 @@ | |||
1 | #ifndef _ASM_M68K_MODULE_H | 1 | #ifndef _ASM_M68K_MODULE_H |
2 | #define _ASM_M68K_MODULE_H | 2 | #define _ASM_M68K_MODULE_H |
3 | struct mod_arch_specific { }; | 3 | |
4 | struct mod_arch_specific { | ||
5 | struct m68k_fixup_info *fixup_start, *fixup_end; | ||
6 | }; | ||
7 | |||
8 | #define MODULE_ARCH_INIT { \ | ||
9 | .fixup_start = __start_fixup, \ | ||
10 | .fixup_end = __stop_fixup, \ | ||
11 | } | ||
12 | |||
4 | #define Elf_Shdr Elf32_Shdr | 13 | #define Elf_Shdr Elf32_Shdr |
5 | #define Elf_Sym Elf32_Sym | 14 | #define Elf_Sym Elf32_Sym |
6 | #define Elf_Ehdr Elf32_Ehdr | 15 | #define Elf_Ehdr Elf32_Ehdr |
16 | |||
17 | |||
18 | enum m68k_fixup_type { | ||
19 | m68k_fixup_memoffset, | ||
20 | m68k_fixup_vnode_shift, | ||
21 | }; | ||
22 | |||
23 | struct m68k_fixup_info { | ||
24 | enum m68k_fixup_type type; | ||
25 | void *addr; | ||
26 | }; | ||
27 | |||
28 | #define m68k_fixup(type, addr) \ | ||
29 | " .section \".m68k_fixup\",\"aw\"\n" \ | ||
30 | " .long " #type "," #addr "\n" \ | ||
31 | " .previous\n" | ||
32 | |||
33 | extern struct m68k_fixup_info __start_fixup[], __stop_fixup[]; | ||
34 | |||
35 | struct module; | ||
36 | extern void module_fixup(struct module *mod, struct m68k_fixup_info *start, | ||
37 | struct m68k_fixup_info *end); | ||
38 | |||
7 | #endif /* _ASM_M68K_MODULE_H */ | 39 | #endif /* _ASM_M68K_MODULE_H */ |
diff --git a/include/asm-m68k/motorola_pgtable.h b/include/asm-m68k/motorola_pgtable.h index 61e4406ed96a..b5b78c01eb6c 100644 --- a/include/asm-m68k/motorola_pgtable.h +++ b/include/asm-m68k/motorola_pgtable.h | |||
@@ -130,7 +130,7 @@ static inline void pgd_set(pgd_t *pgdp, pmd_t *pmdp) | |||
130 | #define pte_present(pte) (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROTNONE)) | 130 | #define pte_present(pte) (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROTNONE)) |
131 | #define pte_clear(mm,addr,ptep) ({ pte_val(*(ptep)) = 0; }) | 131 | #define pte_clear(mm,addr,ptep) ({ pte_val(*(ptep)) = 0; }) |
132 | 132 | ||
133 | #define pte_page(pte) (mem_map + ((unsigned long)(__va(pte_val(pte)) - PAGE_OFFSET) >> PAGE_SHIFT)) | 133 | #define pte_page(pte) virt_to_page(__va(pte_val(pte))) |
134 | #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) | 134 | #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT) |
135 | #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) | 135 | #define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)) |
136 | 136 | ||
@@ -143,7 +143,7 @@ static inline void pgd_set(pgd_t *pgdp, pmd_t *pmdp) | |||
143 | while (--__i >= 0) \ | 143 | while (--__i >= 0) \ |
144 | *__ptr++ = 0; \ | 144 | *__ptr++ = 0; \ |
145 | }) | 145 | }) |
146 | #define pmd_page(pmd) (mem_map + ((unsigned long)(__va(pmd_val(pmd)) - PAGE_OFFSET) >> PAGE_SHIFT)) | 146 | #define pmd_page(pmd) virt_to_page(__va(pmd_val(pmd))) |
147 | 147 | ||
148 | 148 | ||
149 | #define pgd_none(pgd) (!pgd_val(pgd)) | 149 | #define pgd_none(pgd) (!pgd_val(pgd)) |
@@ -223,10 +223,10 @@ static inline pte_t *pte_offset_kernel(pmd_t *pmdp, unsigned long address) | |||
223 | return (pte_t *)__pmd_page(*pmdp) + ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)); | 223 | return (pte_t *)__pmd_page(*pmdp) + ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)); |
224 | } | 224 | } |
225 | 225 | ||
226 | #define pte_offset_map(pmdp,address) ((pte_t *)kmap(pmd_page(*pmdp)) + ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))) | 226 | #define pte_offset_map(pmdp,address) ((pte_t *)__pmd_page(*pmdp) + (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))) |
227 | #define pte_offset_map_nested(pmdp, address) pte_offset_map(pmdp, address) | 227 | #define pte_offset_map_nested(pmdp, address) pte_offset_map(pmdp, address) |
228 | #define pte_unmap(pte) kunmap(pte) | 228 | #define pte_unmap(pte) ((void)0) |
229 | #define pte_unmap_nested(pte) kunmap(pte) | 229 | #define pte_unmap_nested(pte) ((void)0) |
230 | 230 | ||
231 | /* | 231 | /* |
232 | * Allocate and free page tables. The xxx_kernel() versions are | 232 | * Allocate and free page tables. The xxx_kernel() versions are |
diff --git a/include/asm-m68k/page.h b/include/asm-m68k/page.h index fcc165ddd09e..9e6d0d6debdb 100644 --- a/include/asm-m68k/page.h +++ b/include/asm-m68k/page.h | |||
@@ -27,6 +27,8 @@ | |||
27 | 27 | ||
28 | #ifndef __ASSEMBLY__ | 28 | #ifndef __ASSEMBLY__ |
29 | 29 | ||
30 | #include <asm/module.h> | ||
31 | |||
30 | #define get_user_page(vaddr) __get_free_page(GFP_KERNEL) | 32 | #define get_user_page(vaddr) __get_free_page(GFP_KERNEL) |
31 | #define free_user_page(page, addr) free_page(addr) | 33 | #define free_user_page(page, addr) free_page(addr) |
32 | 34 | ||
@@ -114,18 +116,33 @@ typedef struct { unsigned long pgprot; } pgprot_t; | |||
114 | 116 | ||
115 | #ifndef __ASSEMBLY__ | 117 | #ifndef __ASSEMBLY__ |
116 | 118 | ||
119 | extern unsigned long m68k_memoffset; | ||
120 | |||
117 | #ifndef CONFIG_SUN3 | 121 | #ifndef CONFIG_SUN3 |
118 | 122 | ||
119 | #define WANT_PAGE_VIRTUAL | 123 | #define WANT_PAGE_VIRTUAL |
120 | #ifdef CONFIG_SINGLE_MEMORY_CHUNK | ||
121 | extern unsigned long m68k_memoffset; | ||
122 | 124 | ||
123 | #define __pa(vaddr) ((unsigned long)(vaddr)+m68k_memoffset) | 125 | static inline unsigned long ___pa(void *vaddr) |
124 | #define __va(paddr) ((void *)((unsigned long)(paddr)-m68k_memoffset)) | 126 | { |
125 | #else | 127 | unsigned long paddr; |
126 | #define __pa(vaddr) virt_to_phys((void *)(vaddr)) | 128 | asm ( |
127 | #define __va(paddr) phys_to_virt((unsigned long)(paddr)) | 129 | "1: addl #0,%0\n" |
128 | #endif | 130 | m68k_fixup(%c2, 1b+2) |
131 | : "=r" (paddr) | ||
132 | : "0" (vaddr), "i" (m68k_fixup_memoffset)); | ||
133 | return paddr; | ||
134 | } | ||
135 | #define __pa(vaddr) ___pa((void *)(vaddr)) | ||
136 | static inline void *__va(unsigned long paddr) | ||
137 | { | ||
138 | void *vaddr; | ||
139 | asm ( | ||
140 | "1: subl #0,%0\n" | ||
141 | m68k_fixup(%c2, 1b+2) | ||
142 | : "=r" (vaddr) | ||
143 | : "0" (paddr), "i" (m68k_fixup_memoffset)); | ||
144 | return vaddr; | ||
145 | } | ||
129 | 146 | ||
130 | #else /* !CONFIG_SUN3 */ | 147 | #else /* !CONFIG_SUN3 */ |
131 | /* This #define is a horrible hack to suppress lots of warnings. --m */ | 148 | /* This #define is a horrible hack to suppress lots of warnings. --m */ |
@@ -161,11 +178,47 @@ static inline void *__va(unsigned long x) | |||
161 | #define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) | 178 | #define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT) |
162 | #define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT) | 179 | #define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT) |
163 | 180 | ||
164 | #define virt_to_page(kaddr) (mem_map + (((unsigned long)(kaddr)-PAGE_OFFSET) >> PAGE_SHIFT)) | 181 | extern int m68k_virt_to_node_shift; |
165 | #define page_to_virt(page) ((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET) | 182 | |
183 | #ifdef CONFIG_SINGLE_MEMORY_CHUNK | ||
184 | #define __virt_to_node(addr) (&pg_data_map[0]) | ||
185 | #else | ||
186 | extern struct pglist_data *pg_data_table[]; | ||
187 | |||
188 | static inline __attribute_const__ int __virt_to_node_shift(void) | ||
189 | { | ||
190 | int shift; | ||
191 | |||
192 | asm ( | ||
193 | "1: moveq #0,%0\n" | ||
194 | m68k_fixup(%c1, 1b) | ||
195 | : "=d" (shift) | ||
196 | : "i" (m68k_fixup_vnode_shift)); | ||
197 | return shift; | ||
198 | } | ||
199 | |||
200 | #define __virt_to_node(addr) (pg_data_table[(unsigned long)(addr) >> __virt_to_node_shift()]) | ||
201 | #endif | ||
166 | 202 | ||
167 | #define pfn_to_page(pfn) virt_to_page(pfn_to_virt(pfn)) | 203 | #define virt_to_page(addr) ({ \ |
168 | #define page_to_pfn(page) virt_to_pfn(page_to_virt(page)) | 204 | pfn_to_page(virt_to_pfn(addr)); \ |
205 | }) | ||
206 | #define page_to_virt(page) ({ \ | ||
207 | pfn_to_virt(page_to_pfn(page)); \ | ||
208 | }) | ||
209 | |||
210 | #define pfn_to_page(pfn) ({ \ | ||
211 | unsigned long __pfn = (pfn); \ | ||
212 | struct pglist_data *pgdat; \ | ||
213 | pgdat = __virt_to_node((unsigned long)pfn_to_virt(__pfn)); \ | ||
214 | pgdat->node_mem_map + (__pfn - pgdat->node_start_pfn); \ | ||
215 | }) | ||
216 | #define page_to_pfn(_page) ({ \ | ||
217 | struct page *__p = (_page); \ | ||
218 | struct pglist_data *pgdat; \ | ||
219 | pgdat = &pg_data_map[page_to_nid(__p)]; \ | ||
220 | ((__p) - pgdat->node_mem_map) + pgdat->node_start_pfn; \ | ||
221 | }) | ||
169 | 222 | ||
170 | #define virt_addr_valid(kaddr) ((void *)(kaddr) >= (void *)PAGE_OFFSET && (void *)(kaddr) < high_memory) | 223 | #define virt_addr_valid(kaddr) ((void *)(kaddr) >= (void *)PAGE_OFFSET && (void *)(kaddr) < high_memory) |
171 | #define pfn_valid(pfn) virt_addr_valid(pfn_to_virt(pfn)) | 224 | #define pfn_valid(pfn) virt_addr_valid(pfn_to_virt(pfn)) |
diff --git a/include/asm-m68k/pgalloc.h b/include/asm-m68k/pgalloc.h index a9cfb4b99d88..4cb1a57ab763 100644 --- a/include/asm-m68k/pgalloc.h +++ b/include/asm-m68k/pgalloc.h | |||
@@ -8,11 +8,12 @@ | |||
8 | #include <asm/virtconvert.h> | 8 | #include <asm/virtconvert.h> |
9 | 9 | ||
10 | 10 | ||
11 | |||
12 | #ifdef CONFIG_SUN3 | 11 | #ifdef CONFIG_SUN3 |
13 | #include <asm/sun3_pgalloc.h> | 12 | #include <asm/sun3_pgalloc.h> |
14 | #else | 13 | #else |
15 | #include <asm/motorola_pgalloc.h> | 14 | #include <asm/motorola_pgalloc.h> |
16 | #endif | 15 | #endif |
17 | 16 | ||
17 | extern void m68k_setup_node(int node); | ||
18 | |||
18 | #endif /* M68K_PGALLOC_H */ | 19 | #endif /* M68K_PGALLOC_H */ |
diff --git a/include/asm-m68k/pgtable.h b/include/asm-m68k/pgtable.h index 555b87a1f7e3..778a4c538eb2 100644 --- a/include/asm-m68k/pgtable.h +++ b/include/asm-m68k/pgtable.h | |||
@@ -107,22 +107,7 @@ extern void *empty_zero_page; | |||
107 | /* 64-bit machines, beware! SRB. */ | 107 | /* 64-bit machines, beware! SRB. */ |
108 | #define SIZEOF_PTR_LOG2 2 | 108 | #define SIZEOF_PTR_LOG2 2 |
109 | 109 | ||
110 | /* | 110 | #define mm_end_of_chunk(addr, len) 0 |
111 | * Check if the addr/len goes up to the end of a physical | ||
112 | * memory chunk. Used for DMA functions. | ||
113 | */ | ||
114 | #ifdef CONFIG_SINGLE_MEMORY_CHUNK | ||
115 | /* | ||
116 | * It makes no sense to consider whether we cross a memory boundary if | ||
117 | * we support just one physical chunk of memory. | ||
118 | */ | ||
119 | static inline int mm_end_of_chunk(unsigned long addr, int len) | ||
120 | { | ||
121 | return 0; | ||
122 | } | ||
123 | #else | ||
124 | int mm_end_of_chunk (unsigned long addr, int len); | ||
125 | #endif | ||
126 | 111 | ||
127 | extern void kernel_set_cachemode(void *addr, unsigned long size, int cmode); | 112 | extern void kernel_set_cachemode(void *addr, unsigned long size, int cmode); |
128 | 113 | ||
diff --git a/include/asm-m68k/sun3_pgtable.h b/include/asm-m68k/sun3_pgtable.h index 5156a28a18d8..b9e62c1e7ae3 100644 --- a/include/asm-m68k/sun3_pgtable.h +++ b/include/asm-m68k/sun3_pgtable.h | |||
@@ -132,8 +132,8 @@ static inline void pte_clear (struct mm_struct *mm, unsigned long addr, pte_t *p | |||
132 | #define pfn_pte(pfn, pgprot) \ | 132 | #define pfn_pte(pfn, pgprot) \ |
133 | ({ pte_t __pte; pte_val(__pte) = pfn | pgprot_val(pgprot); __pte; }) | 133 | ({ pte_t __pte; pte_val(__pte) = pfn | pgprot_val(pgprot); __pte; }) |
134 | 134 | ||
135 | #define pte_page(pte) (mem_map+((__pte_page(pte) - PAGE_OFFSET) >> PAGE_SHIFT)) | 135 | #define pte_page(pte) virt_to_page(__pte_page(pte)) |
136 | #define pmd_page(pmd) (mem_map+((__pmd_page(pmd) - PAGE_OFFSET) >> PAGE_SHIFT)) | 136 | #define pmd_page(pmd) virt_to_page(__pmd_page(pmd)) |
137 | 137 | ||
138 | 138 | ||
139 | static inline int pmd_none2 (pmd_t *pmd) { return !pmd_val (*pmd); } | 139 | static inline int pmd_none2 (pmd_t *pmd) { return !pmd_val (*pmd); } |
diff --git a/include/asm-m68k/virtconvert.h b/include/asm-m68k/virtconvert.h index 83a87c9b1a16..dea32fbc7e51 100644 --- a/include/asm-m68k/virtconvert.h +++ b/include/asm-m68k/virtconvert.h | |||
@@ -8,56 +8,35 @@ | |||
8 | #ifdef __KERNEL__ | 8 | #ifdef __KERNEL__ |
9 | 9 | ||
10 | #include <linux/compiler.h> | 10 | #include <linux/compiler.h> |
11 | #include <linux/mmzone.h> | ||
11 | #include <asm/setup.h> | 12 | #include <asm/setup.h> |
12 | #include <asm/page.h> | 13 | #include <asm/page.h> |
13 | 14 | ||
14 | #ifdef CONFIG_AMIGA | ||
15 | #include <asm/amigahw.h> | ||
16 | #endif | ||
17 | |||
18 | /* | 15 | /* |
19 | * Change virtual addresses to physical addresses and vv. | 16 | * Change virtual addresses to physical addresses and vv. |
20 | */ | 17 | */ |
21 | #ifndef CONFIG_SUN3 | ||
22 | extern unsigned long mm_vtop(unsigned long addr) __attribute_const__; | ||
23 | extern unsigned long mm_ptov(unsigned long addr) __attribute_const__; | ||
24 | #else | ||
25 | static inline unsigned long mm_vtop(unsigned long vaddr) | ||
26 | { | ||
27 | return __pa(vaddr); | ||
28 | } | ||
29 | |||
30 | static inline unsigned long mm_ptov(unsigned long paddr) | ||
31 | { | ||
32 | return (unsigned long)__va(paddr); | ||
33 | } | ||
34 | #endif | ||
35 | |||
36 | #ifdef CONFIG_SINGLE_MEMORY_CHUNK | ||
37 | static inline unsigned long virt_to_phys(void *vaddr) | ||
38 | { | ||
39 | return (unsigned long)vaddr - PAGE_OFFSET + m68k_memory[0].addr; | ||
40 | } | ||
41 | |||
42 | static inline void * phys_to_virt(unsigned long paddr) | ||
43 | { | ||
44 | return (void *)(paddr - m68k_memory[0].addr + PAGE_OFFSET); | ||
45 | } | ||
46 | #else | ||
47 | static inline unsigned long virt_to_phys(void *address) | 18 | static inline unsigned long virt_to_phys(void *address) |
48 | { | 19 | { |
49 | return mm_vtop((unsigned long)address); | 20 | return __pa(address); |
50 | } | 21 | } |
51 | 22 | ||
52 | static inline void *phys_to_virt(unsigned long address) | 23 | static inline void *phys_to_virt(unsigned long address) |
53 | { | 24 | { |
54 | return (void *) mm_ptov(address); | 25 | return __va(address); |
55 | } | 26 | } |
56 | #endif | ||
57 | 27 | ||
58 | /* Permanent address of a page. */ | 28 | /* Permanent address of a page. */ |
59 | #define __page_address(page) (PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT)) | 29 | #ifdef CONFIG_SINGLE_MEMORY_CHUNK |
60 | #define page_to_phys(page) virt_to_phys((void *)__page_address(page)) | 30 | #define page_to_phys(page) \ |
31 | __pa(PAGE_OFFSET + (((page) - pg_data_map[0].node_mem_map) << PAGE_SHIFT)) | ||
32 | #else | ||
33 | #define page_to_phys(_page) ({ \ | ||
34 | struct page *__page = _page; \ | ||
35 | struct pglist_data *pgdat; \ | ||
36 | pgdat = pg_data_table[page_to_nid(__page)]; \ | ||
37 | page_to_pfn(__page) << PAGE_SHIFT; \ | ||
38 | }) | ||
39 | #endif | ||
61 | 40 | ||
62 | /* | 41 | /* |
63 | * IO bus memory addresses are 1:1 with the physical address, | 42 | * IO bus memory addresses are 1:1 with the physical address, |
diff --git a/include/asm-sparc64/hypervisor.h b/include/asm-sparc64/hypervisor.h index 5cdb1ff04838..4a43075a0619 100644 --- a/include/asm-sparc64/hypervisor.h +++ b/include/asm-sparc64/hypervisor.h | |||
@@ -1097,6 +1097,80 @@ extern unsigned long sun4v_mach_set_soft_state(unsigned long soft_state, | |||
1097 | */ | 1097 | */ |
1098 | #define HV_FAST_MACH_GET_SOFT_STATE 0x71 | 1098 | #define HV_FAST_MACH_GET_SOFT_STATE 0x71 |
1099 | 1099 | ||
1100 | /* svc_send() | ||
1101 | * TRAP: HV_FAST_TRAP | ||
1102 | * FUNCTION: HV_FAST_SVC_SEND | ||
1103 | * ARG0: service ID | ||
1104 | * ARG1: buffer real address | ||
1105 | * ARG2: buffer size | ||
1106 | * RET0: STATUS | ||
1107 | * RET1: sent_bytes | ||
1108 | * | ||
1109 | * Be careful, all output registers are clobbered by this operation, | ||
1110 | * so for example it is not possible to save away a value in %o4 | ||
1111 | * across the trap. | ||
1112 | */ | ||
1113 | #define HV_FAST_SVC_SEND 0x80 | ||
1114 | |||
1115 | /* svc_recv() | ||
1116 | * TRAP: HV_FAST_TRAP | ||
1117 | * FUNCTION: HV_FAST_SVC_RECV | ||
1118 | * ARG0: service ID | ||
1119 | * ARG1: buffer real address | ||
1120 | * ARG2: buffer size | ||
1121 | * RET0: STATUS | ||
1122 | * RET1: recv_bytes | ||
1123 | * | ||
1124 | * Be careful, all output registers are clobbered by this operation, | ||
1125 | * so for example it is not possible to save away a value in %o4 | ||
1126 | * across the trap. | ||
1127 | */ | ||
1128 | #define HV_FAST_SVC_RECV 0x81 | ||
1129 | |||
1130 | /* svc_getstatus() | ||
1131 | * TRAP: HV_FAST_TRAP | ||
1132 | * FUNCTION: HV_FAST_SVC_GETSTATUS | ||
1133 | * ARG0: service ID | ||
1134 | * RET0: STATUS | ||
1135 | * RET1: status bits | ||
1136 | */ | ||
1137 | #define HV_FAST_SVC_GETSTATUS 0x82 | ||
1138 | |||
1139 | /* svc_setstatus() | ||
1140 | * TRAP: HV_FAST_TRAP | ||
1141 | * FUNCTION: HV_FAST_SVC_SETSTATUS | ||
1142 | * ARG0: service ID | ||
1143 | * ARG1: bits to set | ||
1144 | * RET0: STATUS | ||
1145 | */ | ||
1146 | #define HV_FAST_SVC_SETSTATUS 0x83 | ||
1147 | |||
1148 | /* svc_clrstatus() | ||
1149 | * TRAP: HV_FAST_TRAP | ||
1150 | * FUNCTION: HV_FAST_SVC_CLRSTATUS | ||
1151 | * ARG0: service ID | ||
1152 | * ARG1: bits to clear | ||
1153 | * RET0: STATUS | ||
1154 | */ | ||
1155 | #define HV_FAST_SVC_CLRSTATUS 0x84 | ||
1156 | |||
1157 | #ifndef __ASSEMBLY__ | ||
1158 | extern unsigned long sun4v_svc_send(unsigned long svc_id, | ||
1159 | unsigned long buffer, | ||
1160 | unsigned long buffer_size, | ||
1161 | unsigned long *sent_bytes); | ||
1162 | extern unsigned long sun4v_svc_recv(unsigned long svc_id, | ||
1163 | unsigned long buffer, | ||
1164 | unsigned long buffer_size, | ||
1165 | unsigned long *recv_bytes); | ||
1166 | extern unsigned long sun4v_svc_getstatus(unsigned long svc_id, | ||
1167 | unsigned long *status_bits); | ||
1168 | extern unsigned long sun4v_svc_setstatus(unsigned long svc_id, | ||
1169 | unsigned long status_bits); | ||
1170 | extern unsigned long sun4v_svc_clrstatus(unsigned long svc_id, | ||
1171 | unsigned long status_bits); | ||
1172 | #endif | ||
1173 | |||
1100 | /* Trap trace services. | 1174 | /* Trap trace services. |
1101 | * | 1175 | * |
1102 | * The hypervisor provides a trap tracing capability for privileged | 1176 | * The hypervisor provides a trap tracing capability for privileged |
@@ -2724,6 +2798,100 @@ struct hv_mmu_statistics { | |||
2724 | */ | 2798 | */ |
2725 | #define HV_FAST_MMUSTAT_INFO 0x103 | 2799 | #define HV_FAST_MMUSTAT_INFO 0x103 |
2726 | 2800 | ||
2801 | /* NCS crypto services */ | ||
2802 | |||
2803 | /* ncs_request() sub-function numbers */ | ||
2804 | #define HV_NCS_QCONF 0x01 | ||
2805 | #define HV_NCS_QTAIL_UPDATE 0x02 | ||
2806 | |||
2807 | #ifndef __ASSEMBLY__ | ||
2808 | struct hv_ncs_queue_entry { | ||
2809 | /* MAU Control Register */ | ||
2810 | unsigned long mau_control; | ||
2811 | #define MAU_CONTROL_INV_PARITY 0x0000000000002000 | ||
2812 | #define MAU_CONTROL_STRAND 0x0000000000001800 | ||
2813 | #define MAU_CONTROL_BUSY 0x0000000000000400 | ||
2814 | #define MAU_CONTROL_INT 0x0000000000000200 | ||
2815 | #define MAU_CONTROL_OP 0x00000000000001c0 | ||
2816 | #define MAU_CONTROL_OP_SHIFT 6 | ||
2817 | #define MAU_OP_LOAD_MA_MEMORY 0x0 | ||
2818 | #define MAU_OP_STORE_MA_MEMORY 0x1 | ||
2819 | #define MAU_OP_MODULAR_MULT 0x2 | ||
2820 | #define MAU_OP_MODULAR_REDUCE 0x3 | ||
2821 | #define MAU_OP_MODULAR_EXP_LOOP 0x4 | ||
2822 | #define MAU_CONTROL_LEN 0x000000000000003f | ||
2823 | #define MAU_CONTROL_LEN_SHIFT 0 | ||
2824 | |||
2825 | /* Real address of bytes to load or store bytes | ||
2826 | * into/out-of the MAU. | ||
2827 | */ | ||
2828 | unsigned long mau_mpa; | ||
2829 | |||
2830 | /* Modular Arithmetic MA Offset Register. */ | ||
2831 | unsigned long mau_ma; | ||
2832 | |||
2833 | /* Modular Arithmetic N Prime Register. */ | ||
2834 | unsigned long mau_np; | ||
2835 | }; | ||
2836 | |||
2837 | struct hv_ncs_qconf_arg { | ||
2838 | unsigned long mid; /* MAU ID, 1 per core on Niagara */ | ||
2839 | unsigned long base; /* Real address base of queue */ | ||
2840 | unsigned long end; /* Real address end of queue */ | ||
2841 | unsigned long num_ents; /* Number of entries in queue */ | ||
2842 | }; | ||
2843 | |||
2844 | struct hv_ncs_qtail_update_arg { | ||
2845 | unsigned long mid; /* MAU ID, 1 per core on Niagara */ | ||
2846 | unsigned long tail; /* New tail index to use */ | ||
2847 | unsigned long syncflag; /* only SYNCFLAG_SYNC is implemented */ | ||
2848 | #define HV_NCS_SYNCFLAG_SYNC 0x00 | ||
2849 | #define HV_NCS_SYNCFLAG_ASYNC 0x01 | ||
2850 | }; | ||
2851 | #endif | ||
2852 | |||
2853 | /* ncs_request() | ||
2854 | * TRAP: HV_FAST_TRAP | ||
2855 | * FUNCTION: HV_FAST_NCS_REQUEST | ||
2856 | * ARG0: NCS sub-function | ||
2857 | * ARG1: sub-function argument real address | ||
2858 | * ARG2: size in bytes of sub-function argument | ||
2859 | * RET0: status | ||
2860 | * | ||
2861 | * The MAU chip of the Niagara processor is not directly accessible | ||
2862 | * to privileged code, instead it is programmed indirectly via this | ||
2863 | * hypervisor API. | ||
2864 | * | ||
2865 | * The interfaces defines a queue of MAU operations to perform. | ||
2866 | * Privileged code registers a queue with the hypervisor by invoking | ||
2867 | * this HVAPI with the HV_NCS_QCONF sub-function, which defines the | ||
2868 | * base, end, and number of entries of the queue. Each queue entry | ||
2869 | * contains a MAU register struct block. | ||
2870 | * | ||
2871 | * The privileged code then proceeds to add entries to the queue and | ||
2872 | * then invoke the HV_NCS_QTAIL_UPDATE sub-function. Since only | ||
2873 | * synchronous operations are supported by the current hypervisor, | ||
2874 | * HV_NCS_QTAIL_UPDATE will run all the pending queue entries to | ||
2875 | * completion and return HV_EOK, or return an error code. | ||
2876 | * | ||
2877 | * The real address of the sub-function argument must be aligned on at | ||
2878 | * least an 8-byte boundary. | ||
2879 | * | ||
2880 | * The tail argument of HV_NCS_QTAIL_UPDATE is an index, not a byte | ||
2881 | * offset, into the queue and must be less than or equal the 'num_ents' | ||
2882 | * argument given in the HV_NCS_QCONF call. | ||
2883 | */ | ||
2884 | #define HV_FAST_NCS_REQUEST 0x110 | ||
2885 | |||
2886 | #ifndef __ASSEMBLY__ | ||
2887 | extern unsigned long sun4v_ncs_request(unsigned long request, | ||
2888 | unsigned long arg_ra, | ||
2889 | unsigned long arg_size); | ||
2890 | #endif | ||
2891 | |||
2892 | #define HV_FAST_FIRE_GET_PERFREG 0x120 | ||
2893 | #define HV_FAST_FIRE_SET_PERFREG 0x121 | ||
2894 | |||
2727 | /* Function numbers for HV_CORE_TRAP. */ | 2895 | /* Function numbers for HV_CORE_TRAP. */ |
2728 | #define HV_CORE_SET_VER 0x00 | 2896 | #define HV_CORE_SET_VER 0x00 |
2729 | #define HV_CORE_PUTCHAR 0x01 | 2897 | #define HV_CORE_PUTCHAR 0x01 |
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 09ea01a8a99c..648bd1f0912d 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h | |||
@@ -209,9 +209,8 @@ enum { | |||
209 | DEVCONF_RTR_PROBE_INTERVAL, | 209 | DEVCONF_RTR_PROBE_INTERVAL, |
210 | DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN, | 210 | DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN, |
211 | DEVCONF_PROXY_NDP, | 211 | DEVCONF_PROXY_NDP, |
212 | __DEVCONF_OPTIMISTIC_DAD, | ||
213 | DEVCONF_ACCEPT_SOURCE_ROUTE, | ||
214 | DEVCONF_OPTIMISTIC_DAD, | 212 | DEVCONF_OPTIMISTIC_DAD, |
213 | DEVCONF_ACCEPT_SOURCE_ROUTE, | ||
215 | DEVCONF_MAX | 214 | DEVCONF_MAX |
216 | }; | 215 | }; |
217 | 216 | ||
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index f671cd2f133f..3a70f553b28f 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -910,6 +910,17 @@ static inline int netif_rx_reschedule(struct net_device *dev, int undo) | |||
910 | return 0; | 910 | return 0; |
911 | } | 911 | } |
912 | 912 | ||
913 | /* same as netif_rx_complete, except that local_irq_save(flags) | ||
914 | * has already been issued | ||
915 | */ | ||
916 | static inline void __netif_rx_complete(struct net_device *dev) | ||
917 | { | ||
918 | BUG_ON(!test_bit(__LINK_STATE_RX_SCHED, &dev->state)); | ||
919 | list_del(&dev->poll_list); | ||
920 | smp_mb__before_clear_bit(); | ||
921 | clear_bit(__LINK_STATE_RX_SCHED, &dev->state); | ||
922 | } | ||
923 | |||
913 | /* Remove interface from poll list: it must be in the poll list | 924 | /* Remove interface from poll list: it must be in the poll list |
914 | * on current cpu. This primitive is called by dev->poll(), when | 925 | * on current cpu. This primitive is called by dev->poll(), when |
915 | * it completes the work. The device cannot be out of poll list at this | 926 | * it completes the work. The device cannot be out of poll list at this |
@@ -920,10 +931,7 @@ static inline void netif_rx_complete(struct net_device *dev) | |||
920 | unsigned long flags; | 931 | unsigned long flags; |
921 | 932 | ||
922 | local_irq_save(flags); | 933 | local_irq_save(flags); |
923 | BUG_ON(!test_bit(__LINK_STATE_RX_SCHED, &dev->state)); | 934 | __netif_rx_complete(dev); |
924 | list_del(&dev->poll_list); | ||
925 | smp_mb__before_clear_bit(); | ||
926 | clear_bit(__LINK_STATE_RX_SCHED, &dev->state); | ||
927 | local_irq_restore(flags); | 935 | local_irq_restore(flags); |
928 | } | 936 | } |
929 | 937 | ||
@@ -940,17 +948,6 @@ static inline void netif_poll_enable(struct net_device *dev) | |||
940 | clear_bit(__LINK_STATE_RX_SCHED, &dev->state); | 948 | clear_bit(__LINK_STATE_RX_SCHED, &dev->state); |
941 | } | 949 | } |
942 | 950 | ||
943 | /* same as netif_rx_complete, except that local_irq_save(flags) | ||
944 | * has already been issued | ||
945 | */ | ||
946 | static inline void __netif_rx_complete(struct net_device *dev) | ||
947 | { | ||
948 | BUG_ON(!test_bit(__LINK_STATE_RX_SCHED, &dev->state)); | ||
949 | list_del(&dev->poll_list); | ||
950 | smp_mb__before_clear_bit(); | ||
951 | clear_bit(__LINK_STATE_RX_SCHED, &dev->state); | ||
952 | } | ||
953 | |||
954 | static inline void netif_tx_lock(struct net_device *dev) | 951 | static inline void netif_tx_lock(struct net_device *dev) |
955 | { | 952 | { |
956 | spin_lock(&dev->_xmit_lock); | 953 | spin_lock(&dev->_xmit_lock); |
diff --git a/include/net/sock.h b/include/net/sock.h index 689b886038da..dfeb8b13024f 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
@@ -218,13 +218,13 @@ struct sock { | |||
218 | atomic_t sk_rmem_alloc; | 218 | atomic_t sk_rmem_alloc; |
219 | atomic_t sk_wmem_alloc; | 219 | atomic_t sk_wmem_alloc; |
220 | atomic_t sk_omem_alloc; | 220 | atomic_t sk_omem_alloc; |
221 | int sk_sndbuf; | ||
221 | struct sk_buff_head sk_receive_queue; | 222 | struct sk_buff_head sk_receive_queue; |
222 | struct sk_buff_head sk_write_queue; | 223 | struct sk_buff_head sk_write_queue; |
223 | struct sk_buff_head sk_async_wait_queue; | 224 | struct sk_buff_head sk_async_wait_queue; |
224 | int sk_wmem_queued; | 225 | int sk_wmem_queued; |
225 | int sk_forward_alloc; | 226 | int sk_forward_alloc; |
226 | gfp_t sk_allocation; | 227 | gfp_t sk_allocation; |
227 | int sk_sndbuf; | ||
228 | int sk_route_caps; | 228 | int sk_route_caps; |
229 | int sk_gso_type; | 229 | int sk_gso_type; |
230 | int sk_rcvlowat; | 230 | int sk_rcvlowat; |
diff --git a/include/net/tcp.h b/include/net/tcp.h index e22b4f0305a3..a8af9ae00177 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h | |||
@@ -254,6 +254,12 @@ static inline int between(__u32 seq1, __u32 seq2, __u32 seq3) | |||
254 | return seq3 - seq2 >= seq1 - seq2; | 254 | return seq3 - seq2 >= seq1 - seq2; |
255 | } | 255 | } |
256 | 256 | ||
257 | static inline int tcp_too_many_orphans(struct sock *sk, int num) | ||
258 | { | ||
259 | return (num > sysctl_tcp_max_orphans) || | ||
260 | (sk->sk_wmem_queued > SOCK_MIN_SNDBUF && | ||
261 | atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2]); | ||
262 | } | ||
257 | 263 | ||
258 | extern struct proto tcp_prot; | 264 | extern struct proto tcp_prot; |
259 | 265 | ||
diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 39ef925d39dd..90185e8b335e 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h | |||
@@ -237,7 +237,6 @@ extern int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo); | |||
237 | extern int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo); | 237 | extern int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo); |
238 | extern void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c); | 238 | extern void km_policy_notify(struct xfrm_policy *xp, int dir, struct km_event *c); |
239 | extern void km_state_notify(struct xfrm_state *x, struct km_event *c); | 239 | extern void km_state_notify(struct xfrm_state *x, struct km_event *c); |
240 | #define XFRM_ACQ_EXPIRES 30 | ||
241 | 240 | ||
242 | struct xfrm_tmpl; | 241 | struct xfrm_tmpl; |
243 | extern int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol); | 242 | extern int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol); |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index d8970623c566..bd8e33582d25 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -2689,7 +2689,7 @@ static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat) | |||
2689 | map = alloc_bootmem_node(pgdat, size); | 2689 | map = alloc_bootmem_node(pgdat, size); |
2690 | pgdat->node_mem_map = map + (pgdat->node_start_pfn - start); | 2690 | pgdat->node_mem_map = map + (pgdat->node_start_pfn - start); |
2691 | } | 2691 | } |
2692 | #ifdef CONFIG_FLATMEM | 2692 | #ifndef CONFIG_NEED_MULTIPLE_NODES |
2693 | /* | 2693 | /* |
2694 | * With no DISCONTIG, the global mem_map is just set as node 0's | 2694 | * With no DISCONTIG, the global mem_map is just set as node 0's |
2695 | */ | 2695 | */ |
@@ -2435,6 +2435,7 @@ void __init kmem_cache_init(void) | |||
2435 | */ | 2435 | */ |
2436 | create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node", | 2436 | create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node", |
2437 | sizeof(struct kmem_cache_node), GFP_KERNEL); | 2437 | sizeof(struct kmem_cache_node), GFP_KERNEL); |
2438 | kmalloc_caches[0].refcount = -1; | ||
2438 | #endif | 2439 | #endif |
2439 | 2440 | ||
2440 | /* Able to allocate the per node structures */ | 2441 | /* Able to allocate the per node structures */ |
@@ -2482,6 +2483,12 @@ static int slab_unmergeable(struct kmem_cache *s) | |||
2482 | if (s->ctor) | 2483 | if (s->ctor) |
2483 | return 1; | 2484 | return 1; |
2484 | 2485 | ||
2486 | /* | ||
2487 | * We may have set a slab to be unmergeable during bootstrap. | ||
2488 | */ | ||
2489 | if (s->refcount < 0) | ||
2490 | return 1; | ||
2491 | |||
2485 | return 0; | 2492 | return 0; |
2486 | } | 2493 | } |
2487 | 2494 | ||
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index 91b017016d5b..3fc697293819 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c | |||
@@ -121,6 +121,7 @@ void br_fdb_cleanup(unsigned long _data) | |||
121 | { | 121 | { |
122 | struct net_bridge *br = (struct net_bridge *)_data; | 122 | struct net_bridge *br = (struct net_bridge *)_data; |
123 | unsigned long delay = hold_time(br); | 123 | unsigned long delay = hold_time(br); |
124 | unsigned long next_timer = jiffies + br->forward_delay; | ||
124 | int i; | 125 | int i; |
125 | 126 | ||
126 | spin_lock_bh(&br->hash_lock); | 127 | spin_lock_bh(&br->hash_lock); |
@@ -129,14 +130,21 @@ void br_fdb_cleanup(unsigned long _data) | |||
129 | struct hlist_node *h, *n; | 130 | struct hlist_node *h, *n; |
130 | 131 | ||
131 | hlist_for_each_entry_safe(f, h, n, &br->hash[i], hlist) { | 132 | hlist_for_each_entry_safe(f, h, n, &br->hash[i], hlist) { |
132 | if (!f->is_static && | 133 | unsigned long this_timer; |
133 | time_before_eq(f->ageing_timer + delay, jiffies)) | 134 | if (f->is_static) |
135 | continue; | ||
136 | this_timer = f->ageing_timer + delay; | ||
137 | if (time_before_eq(this_timer, jiffies)) | ||
134 | fdb_delete(f); | 138 | fdb_delete(f); |
139 | else if (this_timer < next_timer) | ||
140 | next_timer = this_timer; | ||
135 | } | 141 | } |
136 | } | 142 | } |
137 | spin_unlock_bh(&br->hash_lock); | 143 | spin_unlock_bh(&br->hash_lock); |
138 | 144 | ||
139 | mod_timer(&br->gc_timer, jiffies + HZ/10); | 145 | /* Add HZ/4 to ensure we round the jiffies upwards to be after the next |
146 | * timer, otherwise we might round down and will have no-op run. */ | ||
147 | mod_timer(&br->gc_timer, round_jiffies(next_timer + HZ/4)); | ||
140 | } | 148 | } |
141 | 149 | ||
142 | /* Completely flush all dynamic entries in forwarding database.*/ | 150 | /* Completely flush all dynamic entries in forwarding database.*/ |
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c index 0e035d6162cc..e38034aa56f5 100644 --- a/net/bridge/br_stp.c +++ b/net/bridge/br_stp.c | |||
@@ -178,7 +178,8 @@ void br_transmit_config(struct net_bridge_port *p) | |||
178 | br_send_config_bpdu(p, &bpdu); | 178 | br_send_config_bpdu(p, &bpdu); |
179 | p->topology_change_ack = 0; | 179 | p->topology_change_ack = 0; |
180 | p->config_pending = 0; | 180 | p->config_pending = 0; |
181 | mod_timer(&p->hold_timer, jiffies + BR_HOLD_TIME); | 181 | mod_timer(&p->hold_timer, |
182 | round_jiffies(jiffies + BR_HOLD_TIME)); | ||
182 | } | 183 | } |
183 | } | 184 | } |
184 | 185 | ||
diff --git a/net/bridge/br_stp_timer.c b/net/bridge/br_stp_timer.c index 24e0ca4a3131..77f5255e6915 100644 --- a/net/bridge/br_stp_timer.c +++ b/net/bridge/br_stp_timer.c | |||
@@ -42,7 +42,7 @@ static void br_hello_timer_expired(unsigned long arg) | |||
42 | if (br->dev->flags & IFF_UP) { | 42 | if (br->dev->flags & IFF_UP) { |
43 | br_config_bpdu_generation(br); | 43 | br_config_bpdu_generation(br); |
44 | 44 | ||
45 | mod_timer(&br->hello_timer, jiffies + br->hello_time); | 45 | mod_timer(&br->hello_timer, round_jiffies(jiffies + br->hello_time)); |
46 | } | 46 | } |
47 | spin_unlock(&br->lock); | 47 | spin_unlock(&br->lock); |
48 | } | 48 | } |
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index f34aca041a25..6d5ea9762040 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c | |||
@@ -25,6 +25,7 @@ extern int sysctl_core_destroy_delay; | |||
25 | extern u32 sysctl_xfrm_aevent_etime; | 25 | extern u32 sysctl_xfrm_aevent_etime; |
26 | extern u32 sysctl_xfrm_aevent_rseqth; | 26 | extern u32 sysctl_xfrm_aevent_rseqth; |
27 | extern int sysctl_xfrm_larval_drop; | 27 | extern int sysctl_xfrm_larval_drop; |
28 | extern u32 sysctl_xfrm_acq_expires; | ||
28 | #endif | 29 | #endif |
29 | 30 | ||
30 | ctl_table core_table[] = { | 31 | ctl_table core_table[] = { |
@@ -127,6 +128,14 @@ ctl_table core_table[] = { | |||
127 | .mode = 0644, | 128 | .mode = 0644, |
128 | .proc_handler = &proc_dointvec | 129 | .proc_handler = &proc_dointvec |
129 | }, | 130 | }, |
131 | { | ||
132 | .ctl_name = CTL_UNNUMBERED, | ||
133 | .procname = "xfrm_acq_expires", | ||
134 | .data = &sysctl_xfrm_acq_expires, | ||
135 | .maxlen = sizeof(int), | ||
136 | .mode = 0644, | ||
137 | .proc_handler = &proc_dointvec | ||
138 | }, | ||
130 | #endif /* CONFIG_XFRM */ | 139 | #endif /* CONFIG_XFRM */ |
131 | #endif /* CONFIG_NET */ | 140 | #endif /* CONFIG_NET */ |
132 | { | 141 | { |
diff --git a/net/core/utils.c b/net/core/utils.c index adecfd281ae9..2030bb8c2d30 100644 --- a/net/core/utils.c +++ b/net/core/utils.c | |||
@@ -139,16 +139,16 @@ int in4_pton(const char *src, int srclen, | |||
139 | while(1) { | 139 | while(1) { |
140 | int c; | 140 | int c; |
141 | c = xdigit2bin(srclen > 0 ? *s : '\0', delim); | 141 | c = xdigit2bin(srclen > 0 ? *s : '\0', delim); |
142 | if (!(c & (IN6PTON_DIGIT | IN6PTON_DOT | IN6PTON_DELIM))) { | 142 | if (!(c & (IN6PTON_DIGIT | IN6PTON_DOT | IN6PTON_DELIM | IN6PTON_COLON_MASK))) { |
143 | goto out; | 143 | goto out; |
144 | } | 144 | } |
145 | if (c & (IN6PTON_DOT | IN6PTON_DELIM)) { | 145 | if (c & (IN6PTON_DOT | IN6PTON_DELIM | IN6PTON_COLON_MASK)) { |
146 | if (w == 0) | 146 | if (w == 0) |
147 | goto out; | 147 | goto out; |
148 | *d++ = w & 0xff; | 148 | *d++ = w & 0xff; |
149 | w = 0; | 149 | w = 0; |
150 | i++; | 150 | i++; |
151 | if (c & IN6PTON_DELIM) { | 151 | if (c & (IN6PTON_DELIM | IN6PTON_COLON_MASK)) { |
152 | if (i != 4) | 152 | if (i != 4) |
153 | goto out; | 153 | goto out; |
154 | break; | 154 | break; |
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 837f2957fa83..9ad1f6252a97 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c | |||
@@ -250,8 +250,6 @@ e_inval: | |||
250 | return -EINVAL; | 250 | return -EINVAL; |
251 | } | 251 | } |
252 | 252 | ||
253 | #ifndef CONFIG_IP_NOSIOCRT | ||
254 | |||
255 | static inline __be32 sk_extract_addr(struct sockaddr *addr) | 253 | static inline __be32 sk_extract_addr(struct sockaddr *addr) |
256 | { | 254 | { |
257 | return ((struct sockaddr_in *) addr)->sin_addr.s_addr; | 255 | return ((struct sockaddr_in *) addr)->sin_addr.s_addr; |
@@ -443,15 +441,6 @@ int ip_rt_ioctl(unsigned int cmd, void __user *arg) | |||
443 | return -EINVAL; | 441 | return -EINVAL; |
444 | } | 442 | } |
445 | 443 | ||
446 | #else | ||
447 | |||
448 | int ip_rt_ioctl(unsigned int cmd, void *arg) | ||
449 | { | ||
450 | return -EINVAL; | ||
451 | } | ||
452 | |||
453 | #endif | ||
454 | |||
455 | struct nla_policy rtm_ipv4_policy[RTA_MAX+1] __read_mostly = { | 444 | struct nla_policy rtm_ipv4_policy[RTA_MAX+1] __read_mostly = { |
456 | [RTA_DST] = { .type = NLA_U32 }, | 445 | [RTA_DST] = { .type = NLA_U32 }, |
457 | [RTA_SRC] = { .type = NLA_U32 }, | 446 | [RTA_SRC] = { .type = NLA_U32 }, |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index bd4c295f5d79..766314505c09 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -1674,9 +1674,8 @@ adjudge_to_death: | |||
1674 | } | 1674 | } |
1675 | if (sk->sk_state != TCP_CLOSE) { | 1675 | if (sk->sk_state != TCP_CLOSE) { |
1676 | sk_stream_mem_reclaim(sk); | 1676 | sk_stream_mem_reclaim(sk); |
1677 | if (atomic_read(sk->sk_prot->orphan_count) > sysctl_tcp_max_orphans || | 1677 | if (tcp_too_many_orphans(sk, |
1678 | (sk->sk_wmem_queued > SOCK_MIN_SNDBUF && | 1678 | atomic_read(sk->sk_prot->orphan_count))) { |
1679 | atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])) { | ||
1680 | if (net_ratelimit()) | 1679 | if (net_ratelimit()) |
1681 | printk(KERN_INFO "TCP: too many of orphaned " | 1680 | printk(KERN_INFO "TCP: too many of orphaned " |
1682 | "sockets\n"); | 1681 | "sockets\n"); |
diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c index 3938d5dbdf20..760165a0800c 100644 --- a/net/ipv4/tcp_probe.c +++ b/net/ipv4/tcp_probe.c | |||
@@ -80,7 +80,8 @@ static void printl(const char *fmt, ...) | |||
80 | 80 | ||
81 | kfifo_put(tcpw.fifo, tbuf, len); | 81 | kfifo_put(tcpw.fifo, tbuf, len); |
82 | wake_up(&tcpw.wait); | 82 | wake_up(&tcpw.wait); |
83 | } | 83 | } __attribute__ ((format (printf, 1, 2))); |
84 | |||
84 | 85 | ||
85 | /* | 86 | /* |
86 | * Hook inserted to be called before each receive packet. | 87 | * Hook inserted to be called before each receive packet. |
@@ -95,7 +96,7 @@ static int jtcp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
95 | /* Only update if port matches */ | 96 | /* Only update if port matches */ |
96 | if ((port == 0 || ntohs(inet->dport) == port || ntohs(inet->sport) == port) | 97 | if ((port == 0 || ntohs(inet->dport) == port || ntohs(inet->sport) == port) |
97 | && (full || tp->snd_cwnd != tcpw.lastcwnd)) { | 98 | && (full || tp->snd_cwnd != tcpw.lastcwnd)) { |
98 | printl("%d.%d.%d.%d:%u %d.%d.%d.%d:%u %d %#x %#x %u %u %u\n", | 99 | printl("%d.%d.%d.%d:%u %d.%d.%d.%d:%u %d %#x %#x %u %u %u %u\n", |
99 | NIPQUAD(inet->saddr), ntohs(inet->sport), | 100 | NIPQUAD(inet->saddr), ntohs(inet->sport), |
100 | NIPQUAD(inet->daddr), ntohs(inet->dport), | 101 | NIPQUAD(inet->daddr), ntohs(inet->dport), |
101 | skb->len, tp->snd_nxt, tp->snd_una, | 102 | skb->len, tp->snd_nxt, tp->snd_una, |
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 2ca97b20929d..e61340150ba6 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c | |||
@@ -78,9 +78,7 @@ static int tcp_out_of_resources(struct sock *sk, int do_reset) | |||
78 | if (sk->sk_err_soft) | 78 | if (sk->sk_err_soft) |
79 | orphans <<= 1; | 79 | orphans <<= 1; |
80 | 80 | ||
81 | if (orphans >= sysctl_tcp_max_orphans || | 81 | if (tcp_too_many_orphans(sk, orphans)) { |
82 | (sk->sk_wmem_queued > SOCK_MIN_SNDBUF && | ||
83 | atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])) { | ||
84 | if (net_ratelimit()) | 82 | if (net_ratelimit()) |
85 | printk(KERN_INFO "Out of socket memory\n"); | 83 | printk(KERN_INFO "Out of socket memory\n"); |
86 | 84 | ||
diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c index 5ceca951d73f..fa1902dc81b8 100644 --- a/net/ipv4/xfrm4_input.c +++ b/net/ipv4/xfrm4_input.c | |||
@@ -139,10 +139,8 @@ int xfrm4_rcv_encap(struct sk_buff *skb, __u16 encap_type) | |||
139 | nf_reset(skb); | 139 | nf_reset(skb); |
140 | 140 | ||
141 | if (decaps) { | 141 | if (decaps) { |
142 | if (!(skb->dev->flags&IFF_LOOPBACK)) { | 142 | dst_release(skb->dst); |
143 | dst_release(skb->dst); | 143 | skb->dst = NULL; |
144 | skb->dst = NULL; | ||
145 | } | ||
146 | netif_rx(skb); | 144 | netif_rx(skb); |
147 | return 0; | 145 | return 0; |
148 | } else { | 146 | } else { |
diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c index a2f2e6a5ec5d..9963700e74c1 100644 --- a/net/ipv4/xfrm4_mode_tunnel.c +++ b/net/ipv4/xfrm4_mode_tunnel.c | |||
@@ -85,6 +85,8 @@ static int xfrm4_tunnel_output(struct xfrm_state *x, struct sk_buff *skb) | |||
85 | top_iph->saddr = x->props.saddr.a4; | 85 | top_iph->saddr = x->props.saddr.a4; |
86 | top_iph->daddr = x->id.daddr.a4; | 86 | top_iph->daddr = x->id.daddr.a4; |
87 | 87 | ||
88 | skb->protocol = htons(ETH_P_IP); | ||
89 | |||
88 | memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options)); | 90 | memset(&(IPCB(skb)->opt), 0, sizeof(struct ip_options)); |
89 | return 0; | 91 | return 0; |
90 | } | 92 | } |
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c index b696c8401200..128f94c79c64 100644 --- a/net/ipv6/ah6.c +++ b/net/ipv6/ah6.c | |||
@@ -247,7 +247,7 @@ static int ah6_output(struct xfrm_state *x, struct sk_buff *skb) | |||
247 | memcpy(tmp_base, top_iph, sizeof(tmp_base)); | 247 | memcpy(tmp_base, top_iph, sizeof(tmp_base)); |
248 | 248 | ||
249 | tmp_ext = NULL; | 249 | tmp_ext = NULL; |
250 | extlen = skb_transport_offset(skb) + sizeof(struct ipv6hdr); | 250 | extlen = skb_transport_offset(skb) - sizeof(struct ipv6hdr); |
251 | if (extlen) { | 251 | if (extlen) { |
252 | extlen += sizeof(*tmp_ext); | 252 | extlen += sizeof(*tmp_ext); |
253 | tmp_ext = kmalloc(extlen, GFP_ATOMIC); | 253 | tmp_ext = kmalloc(extlen, GFP_ATOMIC); |
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index ca08ee88d07f..662a7d9681fd 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c | |||
@@ -619,14 +619,6 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt, | |||
619 | 619 | ||
620 | ins = &fn->leaf; | 620 | ins = &fn->leaf; |
621 | 621 | ||
622 | if (fn->fn_flags&RTN_TL_ROOT && | ||
623 | fn->leaf == &ip6_null_entry && | ||
624 | !(rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) ){ | ||
625 | fn->leaf = rt; | ||
626 | rt->u.dst.rt6_next = NULL; | ||
627 | goto out; | ||
628 | } | ||
629 | |||
630 | for (iter = fn->leaf; iter; iter=iter->u.dst.rt6_next) { | 622 | for (iter = fn->leaf; iter; iter=iter->u.dst.rt6_next) { |
631 | /* | 623 | /* |
632 | * Search for duplicates | 624 | * Search for duplicates |
@@ -666,7 +658,6 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt, | |||
666 | * insert node | 658 | * insert node |
667 | */ | 659 | */ |
668 | 660 | ||
669 | out: | ||
670 | rt->u.dst.rt6_next = iter; | 661 | rt->u.dst.rt6_next = iter; |
671 | *ins = rt; | 662 | *ins = rt; |
672 | rt->rt6i_node = fn; | 663 | rt->rt6i_node = fn; |
diff --git a/net/ipv6/xfrm6_input.c b/net/ipv6/xfrm6_input.c index d7ed8aa56ec1..c858537cec4b 100644 --- a/net/ipv6/xfrm6_input.c +++ b/net/ipv6/xfrm6_input.c | |||
@@ -104,10 +104,8 @@ int xfrm6_rcv_spi(struct sk_buff *skb, __be32 spi) | |||
104 | nf_reset(skb); | 104 | nf_reset(skb); |
105 | 105 | ||
106 | if (decaps) { | 106 | if (decaps) { |
107 | if (!(skb->dev->flags&IFF_LOOPBACK)) { | 107 | dst_release(skb->dst); |
108 | dst_release(skb->dst); | 108 | skb->dst = NULL; |
109 | skb->dst = NULL; | ||
110 | } | ||
111 | netif_rx(skb); | 109 | netif_rx(skb); |
112 | return -1; | 110 | return -1; |
113 | } else { | 111 | } else { |
diff --git a/net/ipv6/xfrm6_mode_tunnel.c b/net/ipv6/xfrm6_mode_tunnel.c index a6c0cdf46ad6..9fc95bc6509f 100644 --- a/net/ipv6/xfrm6_mode_tunnel.c +++ b/net/ipv6/xfrm6_mode_tunnel.c | |||
@@ -80,6 +80,7 @@ static int xfrm6_tunnel_output(struct xfrm_state *x, struct sk_buff *skb) | |||
80 | top_iph->hop_limit = dst_metric(dst->child, RTAX_HOPLIMIT); | 80 | top_iph->hop_limit = dst_metric(dst->child, RTAX_HOPLIMIT); |
81 | ipv6_addr_copy(&top_iph->saddr, (struct in6_addr *)&x->props.saddr); | 81 | ipv6_addr_copy(&top_iph->saddr, (struct in6_addr *)&x->props.saddr); |
82 | ipv6_addr_copy(&top_iph->daddr, (struct in6_addr *)&x->id.daddr); | 82 | ipv6_addr_copy(&top_iph->daddr, (struct in6_addr *)&x->id.daddr); |
83 | skb->protocol = htons(ETH_P_IPV6); | ||
83 | return 0; | 84 | return 0; |
84 | } | 85 | } |
85 | 86 | ||
diff --git a/net/mac80211/ieee80211.c b/net/mac80211/ieee80211.c index 6e36df67f8d5..4e84f24fd439 100644 --- a/net/mac80211/ieee80211.c +++ b/net/mac80211/ieee80211.c | |||
@@ -2474,6 +2474,8 @@ static int ieee80211_open(struct net_device *dev) | |||
2474 | if (sdata->type == IEEE80211_IF_TYPE_STA && | 2474 | if (sdata->type == IEEE80211_IF_TYPE_STA && |
2475 | !local->user_space_mlme) | 2475 | !local->user_space_mlme) |
2476 | netif_carrier_off(dev); | 2476 | netif_carrier_off(dev); |
2477 | else | ||
2478 | netif_carrier_on(dev); | ||
2477 | 2479 | ||
2478 | netif_start_queue(dev); | 2480 | netif_start_queue(dev); |
2479 | return 0; | 2481 | return 0; |
@@ -3278,8 +3280,10 @@ ieee80211_rx_h_defragment(struct ieee80211_txrx_data *rx) | |||
3278 | return TXRX_DROP; | 3280 | return TXRX_DROP; |
3279 | } | 3281 | } |
3280 | } | 3282 | } |
3281 | while ((skb = __skb_dequeue(&entry->skb_list))) | 3283 | while ((skb = __skb_dequeue(&entry->skb_list))) { |
3282 | memcpy(skb_put(rx->skb, skb->len), skb->data, skb->len); | 3284 | memcpy(skb_put(rx->skb, skb->len), skb->data, skb->len); |
3285 | dev_kfree_skb(skb); | ||
3286 | } | ||
3283 | 3287 | ||
3284 | /* Complete frame has been reassembled - process it now */ | 3288 | /* Complete frame has been reassembled - process it now */ |
3285 | rx->fragmented = 1; | 3289 | rx->fragmented = 1; |
diff --git a/net/mac80211/ieee80211_sta.c b/net/mac80211/ieee80211_sta.c index 3e07e9d6fa42..9f30ae4c2ab3 100644 --- a/net/mac80211/ieee80211_sta.c +++ b/net/mac80211/ieee80211_sta.c | |||
@@ -1155,6 +1155,8 @@ static void ieee80211_rx_mgmt_assoc_resp(struct net_device *dev, | |||
1155 | if (status_code != WLAN_STATUS_SUCCESS) { | 1155 | if (status_code != WLAN_STATUS_SUCCESS) { |
1156 | printk(KERN_DEBUG "%s: AP denied association (code=%d)\n", | 1156 | printk(KERN_DEBUG "%s: AP denied association (code=%d)\n", |
1157 | dev->name, status_code); | 1157 | dev->name, status_code); |
1158 | if (status_code == WLAN_STATUS_REASSOC_NO_ASSOC) | ||
1159 | ifsta->prev_bssid_set = 0; | ||
1158 | return; | 1160 | return; |
1159 | } | 1161 | } |
1160 | 1162 | ||
@@ -2995,7 +2997,7 @@ struct sta_info * ieee80211_ibss_add_sta(struct net_device *dev, | |||
2995 | { | 2997 | { |
2996 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); | 2998 | struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); |
2997 | struct sta_info *sta; | 2999 | struct sta_info *sta; |
2998 | struct ieee80211_sub_if_data *sdata = NULL; | 3000 | struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); |
2999 | 3001 | ||
3000 | /* TODO: Could consider removing the least recently used entry and | 3002 | /* TODO: Could consider removing the least recently used entry and |
3001 | * allow new one to be added. */ | 3003 | * allow new one to be added. */ |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 02e401cd683f..f8b83014ccca 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -83,22 +83,6 @@ | |||
83 | #include <net/inet_common.h> | 83 | #include <net/inet_common.h> |
84 | #endif | 84 | #endif |
85 | 85 | ||
86 | #define CONFIG_SOCK_PACKET 1 | ||
87 | |||
88 | /* | ||
89 | Proposed replacement for SIOC{ADD,DEL}MULTI and | ||
90 | IFF_PROMISC, IFF_ALLMULTI flags. | ||
91 | |||
92 | It is more expensive, but I believe, | ||
93 | it is really correct solution: reentereble, safe and fault tolerant. | ||
94 | |||
95 | IFF_PROMISC/IFF_ALLMULTI/SIOC{ADD/DEL}MULTI are faked by keeping | ||
96 | reference count and global flag, so that real status is | ||
97 | (gflag|(count != 0)), so that we can use obsolete faulty interface | ||
98 | not harming clever users. | ||
99 | */ | ||
100 | #define CONFIG_PACKET_MULTICAST 1 | ||
101 | |||
102 | /* | 86 | /* |
103 | Assumptions: | 87 | Assumptions: |
104 | - if device has no dev->hard_header routine, it adds and removes ll header | 88 | - if device has no dev->hard_header routine, it adds and removes ll header |
@@ -159,7 +143,6 @@ static atomic_t packet_socks_nr; | |||
159 | 143 | ||
160 | /* Private packet socket structures. */ | 144 | /* Private packet socket structures. */ |
161 | 145 | ||
162 | #ifdef CONFIG_PACKET_MULTICAST | ||
163 | struct packet_mclist | 146 | struct packet_mclist |
164 | { | 147 | { |
165 | struct packet_mclist *next; | 148 | struct packet_mclist *next; |
@@ -179,7 +162,7 @@ struct packet_mreq_max | |||
179 | unsigned short mr_alen; | 162 | unsigned short mr_alen; |
180 | unsigned char mr_address[MAX_ADDR_LEN]; | 163 | unsigned char mr_address[MAX_ADDR_LEN]; |
181 | }; | 164 | }; |
182 | #endif | 165 | |
183 | #ifdef CONFIG_PACKET_MMAP | 166 | #ifdef CONFIG_PACKET_MMAP |
184 | static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing); | 167 | static int packet_set_ring(struct sock *sk, struct tpacket_req *req, int closing); |
185 | #endif | 168 | #endif |
@@ -205,9 +188,7 @@ struct packet_sock { | |||
205 | origdev:1; | 188 | origdev:1; |
206 | int ifindex; /* bound device */ | 189 | int ifindex; /* bound device */ |
207 | __be16 num; | 190 | __be16 num; |
208 | #ifdef CONFIG_PACKET_MULTICAST | ||
209 | struct packet_mclist *mclist; | 191 | struct packet_mclist *mclist; |
210 | #endif | ||
211 | #ifdef CONFIG_PACKET_MMAP | 192 | #ifdef CONFIG_PACKET_MMAP |
212 | atomic_t mapped; | 193 | atomic_t mapped; |
213 | unsigned int pg_vec_order; | 194 | unsigned int pg_vec_order; |
@@ -263,7 +244,6 @@ static void packet_sock_destruct(struct sock *sk) | |||
263 | 244 | ||
264 | static const struct proto_ops packet_ops; | 245 | static const struct proto_ops packet_ops; |
265 | 246 | ||
266 | #ifdef CONFIG_SOCK_PACKET | ||
267 | static const struct proto_ops packet_ops_spkt; | 247 | static const struct proto_ops packet_ops_spkt; |
268 | 248 | ||
269 | static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) | 249 | static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) |
@@ -435,7 +415,6 @@ out_unlock: | |||
435 | dev_put(dev); | 415 | dev_put(dev); |
436 | return err; | 416 | return err; |
437 | } | 417 | } |
438 | #endif | ||
439 | 418 | ||
440 | static inline unsigned int run_filter(struct sk_buff *skb, struct sock *sk, | 419 | static inline unsigned int run_filter(struct sk_buff *skb, struct sock *sk, |
441 | unsigned int res) | 420 | unsigned int res) |
@@ -851,9 +830,7 @@ static int packet_release(struct socket *sock) | |||
851 | __sock_put(sk); | 830 | __sock_put(sk); |
852 | } | 831 | } |
853 | 832 | ||
854 | #ifdef CONFIG_PACKET_MULTICAST | ||
855 | packet_flush_mclist(sk); | 833 | packet_flush_mclist(sk); |
856 | #endif | ||
857 | 834 | ||
858 | #ifdef CONFIG_PACKET_MMAP | 835 | #ifdef CONFIG_PACKET_MMAP |
859 | if (po->pg_vec) { | 836 | if (po->pg_vec) { |
@@ -936,8 +913,6 @@ out_unlock: | |||
936 | * Bind a packet socket to a device | 913 | * Bind a packet socket to a device |
937 | */ | 914 | */ |
938 | 915 | ||
939 | #ifdef CONFIG_SOCK_PACKET | ||
940 | |||
941 | static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, int addr_len) | 916 | static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, int addr_len) |
942 | { | 917 | { |
943 | struct sock *sk=sock->sk; | 918 | struct sock *sk=sock->sk; |
@@ -960,7 +935,6 @@ static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr, int add | |||
960 | } | 935 | } |
961 | return err; | 936 | return err; |
962 | } | 937 | } |
963 | #endif | ||
964 | 938 | ||
965 | static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | 939 | static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) |
966 | { | 940 | { |
@@ -1012,11 +986,8 @@ static int packet_create(struct socket *sock, int protocol) | |||
1012 | 986 | ||
1013 | if (!capable(CAP_NET_RAW)) | 987 | if (!capable(CAP_NET_RAW)) |
1014 | return -EPERM; | 988 | return -EPERM; |
1015 | if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW | 989 | if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW && |
1016 | #ifdef CONFIG_SOCK_PACKET | 990 | sock->type != SOCK_PACKET) |
1017 | && sock->type != SOCK_PACKET | ||
1018 | #endif | ||
1019 | ) | ||
1020 | return -ESOCKTNOSUPPORT; | 991 | return -ESOCKTNOSUPPORT; |
1021 | 992 | ||
1022 | sock->state = SS_UNCONNECTED; | 993 | sock->state = SS_UNCONNECTED; |
@@ -1027,10 +998,9 @@ static int packet_create(struct socket *sock, int protocol) | |||
1027 | goto out; | 998 | goto out; |
1028 | 999 | ||
1029 | sock->ops = &packet_ops; | 1000 | sock->ops = &packet_ops; |
1030 | #ifdef CONFIG_SOCK_PACKET | ||
1031 | if (sock->type == SOCK_PACKET) | 1001 | if (sock->type == SOCK_PACKET) |
1032 | sock->ops = &packet_ops_spkt; | 1002 | sock->ops = &packet_ops_spkt; |
1033 | #endif | 1003 | |
1034 | sock_init_data(sock, sk); | 1004 | sock_init_data(sock, sk); |
1035 | 1005 | ||
1036 | po = pkt_sk(sk); | 1006 | po = pkt_sk(sk); |
@@ -1046,10 +1016,10 @@ static int packet_create(struct socket *sock, int protocol) | |||
1046 | 1016 | ||
1047 | spin_lock_init(&po->bind_lock); | 1017 | spin_lock_init(&po->bind_lock); |
1048 | po->prot_hook.func = packet_rcv; | 1018 | po->prot_hook.func = packet_rcv; |
1049 | #ifdef CONFIG_SOCK_PACKET | 1019 | |
1050 | if (sock->type == SOCK_PACKET) | 1020 | if (sock->type == SOCK_PACKET) |
1051 | po->prot_hook.func = packet_rcv_spkt; | 1021 | po->prot_hook.func = packet_rcv_spkt; |
1052 | #endif | 1022 | |
1053 | po->prot_hook.af_packet_priv = sk; | 1023 | po->prot_hook.af_packet_priv = sk; |
1054 | 1024 | ||
1055 | if (proto) { | 1025 | if (proto) { |
@@ -1169,7 +1139,6 @@ out: | |||
1169 | return err; | 1139 | return err; |
1170 | } | 1140 | } |
1171 | 1141 | ||
1172 | #ifdef CONFIG_SOCK_PACKET | ||
1173 | static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr, | 1142 | static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr, |
1174 | int *uaddr_len, int peer) | 1143 | int *uaddr_len, int peer) |
1175 | { | 1144 | { |
@@ -1190,7 +1159,6 @@ static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr, | |||
1190 | 1159 | ||
1191 | return 0; | 1160 | return 0; |
1192 | } | 1161 | } |
1193 | #endif | ||
1194 | 1162 | ||
1195 | static int packet_getname(struct socket *sock, struct sockaddr *uaddr, | 1163 | static int packet_getname(struct socket *sock, struct sockaddr *uaddr, |
1196 | int *uaddr_len, int peer) | 1164 | int *uaddr_len, int peer) |
@@ -1221,7 +1189,6 @@ static int packet_getname(struct socket *sock, struct sockaddr *uaddr, | |||
1221 | return 0; | 1189 | return 0; |
1222 | } | 1190 | } |
1223 | 1191 | ||
1224 | #ifdef CONFIG_PACKET_MULTICAST | ||
1225 | static void packet_dev_mc(struct net_device *dev, struct packet_mclist *i, int what) | 1192 | static void packet_dev_mc(struct net_device *dev, struct packet_mclist *i, int what) |
1226 | { | 1193 | { |
1227 | switch (i->type) { | 1194 | switch (i->type) { |
@@ -1349,7 +1316,6 @@ static void packet_flush_mclist(struct sock *sk) | |||
1349 | } | 1316 | } |
1350 | rtnl_unlock(); | 1317 | rtnl_unlock(); |
1351 | } | 1318 | } |
1352 | #endif | ||
1353 | 1319 | ||
1354 | static int | 1320 | static int |
1355 | packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen) | 1321 | packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen) |
@@ -1362,7 +1328,6 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv | |||
1362 | return -ENOPROTOOPT; | 1328 | return -ENOPROTOOPT; |
1363 | 1329 | ||
1364 | switch(optname) { | 1330 | switch(optname) { |
1365 | #ifdef CONFIG_PACKET_MULTICAST | ||
1366 | case PACKET_ADD_MEMBERSHIP: | 1331 | case PACKET_ADD_MEMBERSHIP: |
1367 | case PACKET_DROP_MEMBERSHIP: | 1332 | case PACKET_DROP_MEMBERSHIP: |
1368 | { | 1333 | { |
@@ -1383,7 +1348,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv | |||
1383 | ret = packet_mc_drop(sk, &mreq); | 1348 | ret = packet_mc_drop(sk, &mreq); |
1384 | return ret; | 1349 | return ret; |
1385 | } | 1350 | } |
1386 | #endif | 1351 | |
1387 | #ifdef CONFIG_PACKET_MMAP | 1352 | #ifdef CONFIG_PACKET_MMAP |
1388 | case PACKET_RX_RING: | 1353 | case PACKET_RX_RING: |
1389 | { | 1354 | { |
@@ -1506,11 +1471,10 @@ static int packet_notifier(struct notifier_block *this, unsigned long msg, void | |||
1506 | 1471 | ||
1507 | switch (msg) { | 1472 | switch (msg) { |
1508 | case NETDEV_UNREGISTER: | 1473 | case NETDEV_UNREGISTER: |
1509 | #ifdef CONFIG_PACKET_MULTICAST | ||
1510 | if (po->mclist) | 1474 | if (po->mclist) |
1511 | packet_dev_mclist(dev, po->mclist, -1); | 1475 | packet_dev_mclist(dev, po->mclist, -1); |
1512 | // fallthrough | 1476 | /* fallthrough */ |
1513 | #endif | 1477 | |
1514 | case NETDEV_DOWN: | 1478 | case NETDEV_DOWN: |
1515 | if (dev->ifindex == po->ifindex) { | 1479 | if (dev->ifindex == po->ifindex) { |
1516 | spin_lock(&po->bind_lock); | 1480 | spin_lock(&po->bind_lock); |
@@ -1856,7 +1820,6 @@ out: | |||
1856 | #endif | 1820 | #endif |
1857 | 1821 | ||
1858 | 1822 | ||
1859 | #ifdef CONFIG_SOCK_PACKET | ||
1860 | static const struct proto_ops packet_ops_spkt = { | 1823 | static const struct proto_ops packet_ops_spkt = { |
1861 | .family = PF_PACKET, | 1824 | .family = PF_PACKET, |
1862 | .owner = THIS_MODULE, | 1825 | .owner = THIS_MODULE, |
@@ -1877,7 +1840,6 @@ static const struct proto_ops packet_ops_spkt = { | |||
1877 | .mmap = sock_no_mmap, | 1840 | .mmap = sock_no_mmap, |
1878 | .sendpage = sock_no_sendpage, | 1841 | .sendpage = sock_no_sendpage, |
1879 | }; | 1842 | }; |
1880 | #endif | ||
1881 | 1843 | ||
1882 | static const struct proto_ops packet_ops = { | 1844 | static const struct proto_ops packet_ops = { |
1883 | .family = PF_PACKET, | 1845 | .family = PF_PACKET, |
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index b8bab89616a0..64a375178c5f 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -26,10 +26,11 @@ | |||
26 | #include <net/xfrm.h> | 26 | #include <net/xfrm.h> |
27 | #include <net/ip.h> | 27 | #include <net/ip.h> |
28 | #include <linux/audit.h> | 28 | #include <linux/audit.h> |
29 | #include <linux/cache.h> | ||
29 | 30 | ||
30 | #include "xfrm_hash.h" | 31 | #include "xfrm_hash.h" |
31 | 32 | ||
32 | int sysctl_xfrm_larval_drop; | 33 | int sysctl_xfrm_larval_drop __read_mostly; |
33 | 34 | ||
34 | DEFINE_MUTEX(xfrm_cfg_mutex); | 35 | DEFINE_MUTEX(xfrm_cfg_mutex); |
35 | EXPORT_SYMBOL(xfrm_cfg_mutex); | 36 | EXPORT_SYMBOL(xfrm_cfg_mutex); |
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 9955ff4da0a2..372f06eb8bb7 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c | |||
@@ -21,18 +21,21 @@ | |||
21 | #include <linux/cache.h> | 21 | #include <linux/cache.h> |
22 | #include <asm/uaccess.h> | 22 | #include <asm/uaccess.h> |
23 | #include <linux/audit.h> | 23 | #include <linux/audit.h> |
24 | #include <linux/cache.h> | ||
24 | 25 | ||
25 | #include "xfrm_hash.h" | 26 | #include "xfrm_hash.h" |
26 | 27 | ||
27 | struct sock *xfrm_nl; | 28 | struct sock *xfrm_nl; |
28 | EXPORT_SYMBOL(xfrm_nl); | 29 | EXPORT_SYMBOL(xfrm_nl); |
29 | 30 | ||
30 | u32 sysctl_xfrm_aevent_etime = XFRM_AE_ETIME; | 31 | u32 sysctl_xfrm_aevent_etime __read_mostly = XFRM_AE_ETIME; |
31 | EXPORT_SYMBOL(sysctl_xfrm_aevent_etime); | 32 | EXPORT_SYMBOL(sysctl_xfrm_aevent_etime); |
32 | 33 | ||
33 | u32 sysctl_xfrm_aevent_rseqth = XFRM_AE_SEQT_SIZE; | 34 | u32 sysctl_xfrm_aevent_rseqth __read_mostly = XFRM_AE_SEQT_SIZE; |
34 | EXPORT_SYMBOL(sysctl_xfrm_aevent_rseqth); | 35 | EXPORT_SYMBOL(sysctl_xfrm_aevent_rseqth); |
35 | 36 | ||
37 | u32 sysctl_xfrm_acq_expires __read_mostly = 30; | ||
38 | |||
36 | /* Each xfrm_state may be linked to two tables: | 39 | /* Each xfrm_state may be linked to two tables: |
37 | 40 | ||
38 | 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl) | 41 | 1. Hash table by (spi,daddr,ah/esp) to find SA by SPI. (input,ctl) |
@@ -622,8 +625,8 @@ xfrm_state_find(xfrm_address_t *daddr, xfrm_address_t *saddr, | |||
622 | h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, family); | 625 | h = xfrm_spi_hash(&x->id.daddr, x->id.spi, x->id.proto, family); |
623 | hlist_add_head(&x->byspi, xfrm_state_byspi+h); | 626 | hlist_add_head(&x->byspi, xfrm_state_byspi+h); |
624 | } | 627 | } |
625 | x->lft.hard_add_expires_seconds = XFRM_ACQ_EXPIRES; | 628 | x->lft.hard_add_expires_seconds = sysctl_xfrm_acq_expires; |
626 | x->timer.expires = jiffies + XFRM_ACQ_EXPIRES*HZ; | 629 | x->timer.expires = jiffies + sysctl_xfrm_acq_expires*HZ; |
627 | add_timer(&x->timer); | 630 | add_timer(&x->timer); |
628 | xfrm_state_num++; | 631 | xfrm_state_num++; |
629 | xfrm_hash_grow_check(x->bydst.next != NULL); | 632 | xfrm_hash_grow_check(x->bydst.next != NULL); |
@@ -772,9 +775,9 @@ static struct xfrm_state *__find_acq_core(unsigned short family, u8 mode, u32 re | |||
772 | x->props.family = family; | 775 | x->props.family = family; |
773 | x->props.mode = mode; | 776 | x->props.mode = mode; |
774 | x->props.reqid = reqid; | 777 | x->props.reqid = reqid; |
775 | x->lft.hard_add_expires_seconds = XFRM_ACQ_EXPIRES; | 778 | x->lft.hard_add_expires_seconds = sysctl_xfrm_acq_expires; |
776 | xfrm_state_hold(x); | 779 | xfrm_state_hold(x); |
777 | x->timer.expires = jiffies + XFRM_ACQ_EXPIRES*HZ; | 780 | x->timer.expires = jiffies + sysctl_xfrm_acq_expires*HZ; |
778 | add_timer(&x->timer); | 781 | add_timer(&x->timer); |
779 | hlist_add_head(&x->bydst, xfrm_state_bydst+h); | 782 | hlist_add_head(&x->bydst, xfrm_state_bydst+h); |
780 | h = xfrm_src_hash(daddr, saddr, family); | 783 | h = xfrm_src_hash(daddr, saddr, family); |