aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--MAINTAINERS28
-rw-r--r--arch/i386/kernel/apic.c2
-rw-r--r--arch/m68k/kernel/dma.c4
-rw-r--r--arch/m68k/kernel/m68k_ksyms.c4
-rw-r--r--arch/m68k/lib/string.c15
-rw-r--r--arch/m68k/lib/uaccess.c2
-rw-r--r--arch/um/kernel/irq.c2
-rw-r--r--drivers/ide/mips/swarm.c2
-rw-r--r--drivers/md/md.c1
-rw-r--r--drivers/mmc/mmc_block.c104
-rw-r--r--drivers/usb/host/ohci-q.c2
-rw-r--r--fs/nfsd/nfssvc.c2
-rw-r--r--include/asm-generic/bug.h16
-rw-r--r--include/asm-generic/percpu.h2
-rw-r--r--include/asm-m68k/dma-mapping.h2
-rw-r--r--include/asm-m68k/string.h198
-rw-r--r--include/asm-m68k/system.h6
-rw-r--r--include/asm-m68k/user.h2
-rw-r--r--include/asm-s390/percpu.h4
-rw-r--r--include/asm-um/irq_regs.h1
-rw-r--r--include/asm-x86_64/percpu.h6
-rw-r--r--include/linux/mm.h1
-rw-r--r--include/linux/mmc/protocol.h1
-rw-r--r--include/linux/percpu.h2
-rw-r--r--include/linux/sched.h7
-rw-r--r--include/linux/sunrpc/svc.h3
-rw-r--r--include/linux/timex.h3
-rw-r--r--kernel/audit.c3
-rw-r--r--lib/irq_regs.c2
-rw-r--r--mm/memory.c9
-rw-r--r--mm/slab.c34
-rw-r--r--net/sunrpc/svc.c17
-rw-r--r--net/sunrpc/svcsock.c28
33 files changed, 318 insertions, 197 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 84a018ee4556..50fb3ae5f313 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2261,6 +2261,17 @@ T: git kernel.org:/pub/scm/linux/kernel/git/kyle/parisc-2.6.git
2261T: cvs cvs.parisc-linux.org:/var/cvs/linux-2.6 2261T: cvs cvs.parisc-linux.org:/var/cvs/linux-2.6
2262S: Maintained 2262S: Maintained
2263 2263
2264PC87360 HARDWARE MONITORING DRIVER
2265P: Jim Cromie
2266M: jim.cromie@gmail.com
2267L: lm-sensors@lm-sensors.org
2268S: Maintained
2269
2270PC8736x GPIO DRIVER
2271P: Jim Cromie
2272M: jim.cromie@gmail.com
2273S: Maintained
2274
2264PCI ERROR RECOVERY 2275PCI ERROR RECOVERY
2265P: Linas Vepstas 2276P: Linas Vepstas
2266M: linas@austin.ibm.com 2277M: linas@austin.ibm.com
@@ -2592,10 +2603,19 @@ L: lksctp-developers@lists.sourceforge.net
2592S: Supported 2603S: Supported
2593 2604
2594SCx200 CPU SUPPORT 2605SCx200 CPU SUPPORT
2595P: Christer Weinigel 2606P: Jim Cromie
2596M: christer@weinigel.se 2607M: jim.cromie@gmail.com
2597W: http://www.weinigel.se 2608S: Odd Fixes
2598S: Supported 2609
2610SCx200 GPIO DRIVER
2611P: Jim Cromie
2612M: jim.cromie@gmail.com
2613S: Maintained
2614
2615SCx200 HRT CLOCKSOURCE DRIVER
2616P: Jim Cromie
2617M: jim.cromie@gmail.com
2618S: Maintained
2599 2619
2600SECURITY CONTACT 2620SECURITY CONTACT
2601P: Security Officers 2621P: Security Officers
diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c
index 7d500da0e63b..2fd4b7d927c2 100644
--- a/arch/i386/kernel/apic.c
+++ b/arch/i386/kernel/apic.c
@@ -1197,7 +1197,7 @@ inline void smp_local_timer_interrupt(void)
1197{ 1197{
1198 profile_tick(CPU_PROFILING); 1198 profile_tick(CPU_PROFILING);
1199#ifdef CONFIG_SMP 1199#ifdef CONFIG_SMP
1200 update_process_times(user_mode_vm(irq_regs)); 1200 update_process_times(user_mode_vm(get_irq_regs()));
1201#endif 1201#endif
1202 1202
1203 /* 1203 /*
diff --git a/arch/m68k/kernel/dma.c b/arch/m68k/kernel/dma.c
index fc449f8b2045..9d4e4b5b6bd8 100644
--- a/arch/m68k/kernel/dma.c
+++ b/arch/m68k/kernel/dma.c
@@ -15,7 +15,7 @@
15#include <asm/scatterlist.h> 15#include <asm/scatterlist.h>
16 16
17void *dma_alloc_coherent(struct device *dev, size_t size, 17void *dma_alloc_coherent(struct device *dev, size_t size,
18 dma_addr_t *handle, int flag) 18 dma_addr_t *handle, gfp_t flag)
19{ 19{
20 struct page *page, **map; 20 struct page *page, **map;
21 pgprot_t pgprot; 21 pgprot_t pgprot;
@@ -51,7 +51,7 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
51 pgprot_val(pgprot) |= _PAGE_GLOBAL040 | _PAGE_NOCACHE_S; 51 pgprot_val(pgprot) |= _PAGE_GLOBAL040 | _PAGE_NOCACHE_S;
52 else 52 else
53 pgprot_val(pgprot) |= _PAGE_NOCACHE030; 53 pgprot_val(pgprot) |= _PAGE_NOCACHE030;
54 addr = vmap(map, size, flag, pgprot); 54 addr = vmap(map, size, VM_MAP, pgprot);
55 kfree(map); 55 kfree(map);
56 56
57 return addr; 57 return addr;
diff --git a/arch/m68k/kernel/m68k_ksyms.c b/arch/m68k/kernel/m68k_ksyms.c
index aff26a52167c..f9636e84e6a4 100644
--- a/arch/m68k/kernel/m68k_ksyms.c
+++ b/arch/m68k/kernel/m68k_ksyms.c
@@ -1,7 +1,6 @@
1#include <linux/module.h> 1#include <linux/module.h>
2#include <linux/linkage.h> 2#include <linux/linkage.h>
3#include <linux/sched.h> 3#include <linux/sched.h>
4#include <linux/string.h>
5#include <linux/mm.h> 4#include <linux/mm.h>
6#include <linux/user.h> 5#include <linux/user.h>
7#include <linux/elfcore.h> 6#include <linux/elfcore.h>
@@ -53,9 +52,6 @@ EXPORT_SYMBOL(mach_beep);
53#endif 52#endif
54EXPORT_SYMBOL(dump_fpu); 53EXPORT_SYMBOL(dump_fpu);
55EXPORT_SYMBOL(dump_thread); 54EXPORT_SYMBOL(dump_thread);
56EXPORT_SYMBOL(strnlen);
57EXPORT_SYMBOL(strrchr);
58EXPORT_SYMBOL(strstr);
59EXPORT_SYMBOL(kernel_thread); 55EXPORT_SYMBOL(kernel_thread);
60#ifdef CONFIG_VME 56#ifdef CONFIG_VME
61EXPORT_SYMBOL(vme_brdtype); 57EXPORT_SYMBOL(vme_brdtype);
diff --git a/arch/m68k/lib/string.c b/arch/m68k/lib/string.c
index b92b89e1ea0c..891e1347bc4e 100644
--- a/arch/m68k/lib/string.c
+++ b/arch/m68k/lib/string.c
@@ -1,6 +1,19 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file COPYING in the main directory of this archive
4 * for more details.
5 */
6
7#define __IN_STRING_C
1 8
2#include <linux/types.h>
3#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/string.h>
11
12char *strcpy(char *dest, const char *src)
13{
14 return __kernel_strcpy(dest, src);
15}
16EXPORT_SYMBOL(strcpy);
4 17
5void *memset(void *s, int c, size_t count) 18void *memset(void *s, int c, size_t count)
6{ 19{
diff --git a/arch/m68k/lib/uaccess.c b/arch/m68k/lib/uaccess.c
index 1bc188c0d983..865f9fb9e686 100644
--- a/arch/m68k/lib/uaccess.c
+++ b/arch/m68k/lib/uaccess.c
@@ -84,7 +84,7 @@ unsigned long __generic_copy_to_user(void __user *to, const void *from,
84 " .even\n" 84 " .even\n"
85 "20: lsl.l #2,%0\n" 85 "20: lsl.l #2,%0\n"
86 "50: add.l %5,%0\n" 86 "50: add.l %5,%0\n"
87 " jra 7b\n" 87 " jra 8b\n"
88 " .previous\n" 88 " .previous\n"
89 "\n" 89 "\n"
90 " .section __ex_table,\"a\"\n" 90 " .section __ex_table,\"a\"\n"
diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c
index eee97bb81ba5..7c41dabe7a2c 100644
--- a/arch/um/kernel/irq.c
+++ b/arch/um/kernel/irq.c
@@ -356,7 +356,7 @@ void forward_interrupts(int pid)
356unsigned int do_IRQ(int irq, union uml_pt_regs *regs) 356unsigned int do_IRQ(int irq, union uml_pt_regs *regs)
357{ 357{
358 irq_enter(); 358 irq_enter();
359 __do_IRQ(irq, (struct pt_regs *)regs); 359 __do_IRQ(irq);
360 irq_exit(); 360 irq_exit();
361 return 1; 361 return 1;
362} 362}
diff --git a/drivers/ide/mips/swarm.c b/drivers/ide/mips/swarm.c
index 66f6064f4640..09c9e7936b0d 100644
--- a/drivers/ide/mips/swarm.c
+++ b/drivers/ide/mips/swarm.c
@@ -4,6 +4,7 @@
4 * Author: Manish Lachwani, mlachwani@mvista.com 4 * Author: Manish Lachwani, mlachwani@mvista.com
5 * Copyright (C) 2004 MIPS Technologies, Inc. All rights reserved. 5 * Copyright (C) 2004 MIPS Technologies, Inc. All rights reserved.
6 * Author: Maciej W. Rozycki <macro@mips.com> 6 * Author: Maciej W. Rozycki <macro@mips.com>
7 * Copyright (c) 2006 Maciej W. Rozycki
7 * 8 *
8 * This program is free software; you can redistribute it and/or 9 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License 10 * modify it under the terms of the GNU General Public License
@@ -127,6 +128,7 @@ static int __devinit swarm_ide_probe(struct device *dev)
127 memcpy(hwif->io_ports, hwif->hw.io_ports, sizeof(hwif->io_ports)); 128 memcpy(hwif->io_ports, hwif->hw.io_ports, sizeof(hwif->io_ports));
128 hwif->irq = hwif->hw.irq; 129 hwif->irq = hwif->hw.irq;
129 130
131 probe_hwif_init(hwif);
130 dev_set_drvdata(dev, hwif); 132 dev_set_drvdata(dev, hwif);
131 133
132 return 0; 134 return 0;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index cb8281605be8..57fa64f93e5f 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -3849,6 +3849,7 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev)
3849 } 3849 }
3850 clear_bit(In_sync, &rdev->flags); 3850 clear_bit(In_sync, &rdev->flags);
3851 rdev->desc_nr = -1; 3851 rdev->desc_nr = -1;
3852 rdev->saved_raid_disk = -1;
3852 err = bind_rdev_to_array(rdev, mddev); 3853 err = bind_rdev_to_array(rdev, mddev);
3853 if (err) 3854 if (err)
3854 goto abort_export; 3855 goto abort_export;
diff --git a/drivers/mmc/mmc_block.c b/drivers/mmc/mmc_block.c
index c1293f1bda87..f9027c8db792 100644
--- a/drivers/mmc/mmc_block.c
+++ b/drivers/mmc/mmc_block.c
@@ -28,6 +28,7 @@
28#include <linux/kdev_t.h> 28#include <linux/kdev_t.h>
29#include <linux/blkdev.h> 29#include <linux/blkdev.h>
30#include <linux/mutex.h> 30#include <linux/mutex.h>
31#include <linux/scatterlist.h>
31 32
32#include <linux/mmc/card.h> 33#include <linux/mmc/card.h>
33#include <linux/mmc/host.h> 34#include <linux/mmc/host.h>
@@ -154,6 +155,71 @@ static int mmc_blk_prep_rq(struct mmc_queue *mq, struct request *req)
154 return stat; 155 return stat;
155} 156}
156 157
158static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
159{
160 int err;
161 u32 blocks;
162
163 struct mmc_request mrq;
164 struct mmc_command cmd;
165 struct mmc_data data;
166 unsigned int timeout_us;
167
168 struct scatterlist sg;
169
170 memset(&cmd, 0, sizeof(struct mmc_command));
171
172 cmd.opcode = MMC_APP_CMD;
173 cmd.arg = card->rca << 16;
174 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
175
176 err = mmc_wait_for_cmd(card->host, &cmd, 0);
177 if ((err != MMC_ERR_NONE) || !(cmd.resp[0] & R1_APP_CMD))
178 return (u32)-1;
179
180 memset(&cmd, 0, sizeof(struct mmc_command));
181
182 cmd.opcode = SD_APP_SEND_NUM_WR_BLKS;
183 cmd.arg = 0;
184 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
185
186 memset(&data, 0, sizeof(struct mmc_data));
187
188 data.timeout_ns = card->csd.tacc_ns * 100;
189 data.timeout_clks = card->csd.tacc_clks * 100;
190
191 timeout_us = data.timeout_ns / 1000;
192 timeout_us += data.timeout_clks * 1000 /
193 (card->host->ios.clock / 1000);
194
195 if (timeout_us > 100000) {
196 data.timeout_ns = 100000000;
197 data.timeout_clks = 0;
198 }
199
200 data.blksz = 4;
201 data.blocks = 1;
202 data.flags = MMC_DATA_READ;
203 data.sg = &sg;
204 data.sg_len = 1;
205
206 memset(&mrq, 0, sizeof(struct mmc_request));
207
208 mrq.cmd = &cmd;
209 mrq.data = &data;
210
211 sg_init_one(&sg, &blocks, 4);
212
213 mmc_wait_for_req(card->host, &mrq);
214
215 if (cmd.error != MMC_ERR_NONE || data.error != MMC_ERR_NONE)
216 return (u32)-1;
217
218 blocks = ntohl(blocks);
219
220 return blocks;
221}
222
157static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) 223static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
158{ 224{
159 struct mmc_blk_data *md = mq->data; 225 struct mmc_blk_data *md = mq->data;
@@ -184,10 +250,13 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
184 250
185 /* 251 /*
186 * If the host doesn't support multiple block writes, force 252 * If the host doesn't support multiple block writes, force
187 * block writes to single block. 253 * block writes to single block. SD cards are excepted from
254 * this rule as they support querying the number of
255 * successfully written sectors.
188 */ 256 */
189 if (rq_data_dir(req) != READ && 257 if (rq_data_dir(req) != READ &&
190 !(card->host->caps & MMC_CAP_MULTIWRITE)) 258 !(card->host->caps & MMC_CAP_MULTIWRITE) &&
259 !mmc_card_sd(card))
191 brq.data.blocks = 1; 260 brq.data.blocks = 1;
192 261
193 if (brq.data.blocks > 1) { 262 if (brq.data.blocks > 1) {
@@ -276,24 +345,41 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
276 return 1; 345 return 1;
277 346
278 cmd_err: 347 cmd_err:
279 mmc_card_release_host(card);
280
281 ret = 1; 348 ret = 1;
282 349
283 /* 350 /*
284 * For writes and where the host claims to support proper 351 * If this is an SD card and we're writing, we can first
285 * error reporting, we first ok the successful blocks. 352 * mark the known good sectors as ok.
353 *
354 * If the card is not SD, we can still ok written sectors
355 * if the controller can do proper error reporting.
286 * 356 *
287 * For reads we just fail the entire chunk as that should 357 * For reads we just fail the entire chunk as that should
288 * be safe in all cases. 358 * be safe in all cases.
289 */ 359 */
290 if (rq_data_dir(req) != READ && 360 if (rq_data_dir(req) != READ && mmc_card_sd(card)) {
291 (card->host->caps & MMC_CAP_MULTIWRITE)) { 361 u32 blocks;
362 unsigned int bytes;
363
364 blocks = mmc_sd_num_wr_blocks(card);
365 if (blocks != (u32)-1) {
366 if (card->csd.write_partial)
367 bytes = blocks << md->block_bits;
368 else
369 bytes = blocks << 9;
370 spin_lock_irq(&md->lock);
371 ret = end_that_request_chunk(req, 1, bytes);
372 spin_unlock_irq(&md->lock);
373 }
374 } else if (rq_data_dir(req) != READ &&
375 (card->host->caps & MMC_CAP_MULTIWRITE)) {
292 spin_lock_irq(&md->lock); 376 spin_lock_irq(&md->lock);
293 ret = end_that_request_chunk(req, 1, brq.data.bytes_xfered); 377 ret = end_that_request_chunk(req, 1, brq.data.bytes_xfered);
294 spin_unlock_irq(&md->lock); 378 spin_unlock_irq(&md->lock);
295 } 379 }
296 380
381 mmc_card_release_host(card);
382
297 spin_lock_irq(&md->lock); 383 spin_lock_irq(&md->lock);
298 while (ret) { 384 while (ret) {
299 ret = end_that_request_chunk(req, 0, 385 ret = end_that_request_chunk(req, 0,
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
index e08d1a2664e6..fe1fe2f97cb5 100644
--- a/drivers/usb/host/ohci-q.c
+++ b/drivers/usb/host/ohci-q.c
@@ -925,7 +925,7 @@ rescan_all:
925 /* only take off EDs that the HC isn't using, accounting for 925 /* only take off EDs that the HC isn't using, accounting for
926 * frame counter wraps and EDs with partially retired TDs 926 * frame counter wraps and EDs with partially retired TDs
927 */ 927 */
928 if (likely (get_irq_regs() && HC_IS_RUNNING(ohci_to_hcd(ohci)->state))) { 928 if (likely (HC_IS_RUNNING(ohci_to_hcd(ohci)->state))) {
929 if (tick_before (tick, ed->tick)) { 929 if (tick_before (tick, ed->tick)) {
930skip_ed: 930skip_ed:
931 last = &ed->ed_next; 931 last = &ed->ed_next;
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index 6fa6340a5fb8..013b38996e64 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -217,7 +217,7 @@ int nfsd_create_serv(void)
217 217
218 atomic_set(&nfsd_busy, 0); 218 atomic_set(&nfsd_busy, 0);
219 nfsd_serv = svc_create_pooled(&nfsd_program, 219 nfsd_serv = svc_create_pooled(&nfsd_program,
220 NFSD_BUFSIZE - NFSSVC_MAXBLKSIZE + nfsd_max_blksize, 220 nfsd_max_blksize,
221 nfsd_last_thread, 221 nfsd_last_thread,
222 nfsd, SIG_NOCLEAN, THIS_MODULE); 222 nfsd, SIG_NOCLEAN, THIS_MODULE);
223 if (nfsd_serv == NULL) 223 if (nfsd_serv == NULL)
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index a5250895155e..1d9573cf4a0b 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -41,14 +41,14 @@
41#endif 41#endif
42#endif 42#endif
43 43
44#define WARN_ON_ONCE(condition) ({ \ 44#define WARN_ON_ONCE(condition) ({ \
45 static int __warn_once = 1; \ 45 static int __warned; \
46 typeof(condition) __ret_warn_once = (condition);\ 46 typeof(condition) __ret_warn_once = (condition); \
47 \ 47 \
48 if (likely(__warn_once)) \ 48 if (unlikely(__ret_warn_once)) \
49 if (WARN_ON(__ret_warn_once)) \ 49 if (WARN_ON(!__warned)) \
50 __warn_once = 0; \ 50 __warned = 1; \
51 unlikely(__ret_warn_once); \ 51 unlikely(__ret_warn_once); \
52}) 52})
53 53
54#ifdef CONFIG_SMP 54#ifdef CONFIG_SMP
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index 6d45ee5472af..196376262240 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -15,7 +15,7 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
15 15
16/* var is in discarded region: offset to particular copy we want */ 16/* var is in discarded region: offset to particular copy we want */
17#define per_cpu(var, cpu) (*({ \ 17#define per_cpu(var, cpu) (*({ \
18 extern int simple_indentifier_##var(void); \ 18 extern int simple_identifier_##var(void); \
19 RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu]); })) 19 RELOC_HIDE(&per_cpu__##var, __per_cpu_offset[cpu]); }))
20#define __get_cpu_var(var) per_cpu(var, smp_processor_id()) 20#define __get_cpu_var(var) per_cpu(var, smp_processor_id())
21#define __raw_get_cpu_var(var) per_cpu(var, raw_smp_processor_id()) 21#define __raw_get_cpu_var(var) per_cpu(var, raw_smp_processor_id())
diff --git a/include/asm-m68k/dma-mapping.h b/include/asm-m68k/dma-mapping.h
index cebbb03370ec..c1299c3beb50 100644
--- a/include/asm-m68k/dma-mapping.h
+++ b/include/asm-m68k/dma-mapping.h
@@ -26,7 +26,7 @@ static inline int dma_is_consistent(dma_addr_t dma_addr)
26} 26}
27 27
28extern void *dma_alloc_coherent(struct device *, size_t, 28extern void *dma_alloc_coherent(struct device *, size_t,
29 dma_addr_t *, int); 29 dma_addr_t *, gfp_t);
30extern void dma_free_coherent(struct device *, size_t, 30extern void dma_free_coherent(struct device *, size_t,
31 void *, dma_addr_t); 31 void *, dma_addr_t);
32 32
diff --git a/include/asm-m68k/string.h b/include/asm-m68k/string.h
index 6c59215b285e..2eb7df1e0f5d 100644
--- a/include/asm-m68k/string.h
+++ b/include/asm-m68k/string.h
@@ -1,138 +1,114 @@
1#ifndef _M68K_STRING_H_ 1#ifndef _M68K_STRING_H_
2#define _M68K_STRING_H_ 2#define _M68K_STRING_H_
3 3
4#include <asm/setup.h> 4#include <linux/types.h>
5#include <asm/page.h> 5#include <linux/compiler.h>
6 6
7#define __HAVE_ARCH_STRCPY 7static inline size_t __kernel_strlen(const char *s)
8static inline char * strcpy(char * dest,const char *src)
9{ 8{
10 char *xdest = dest; 9 const char *sc;
11
12 __asm__ __volatile__
13 ("1:\tmoveb %1@+,%0@+\n\t"
14 "jne 1b"
15 : "=a" (dest), "=a" (src)
16 : "0" (dest), "1" (src) : "memory");
17 return xdest;
18}
19 10
20#define __HAVE_ARCH_STRNCPY 11 for (sc = s; *sc++; )
21static inline char * strncpy(char *dest, const char *src, size_t n) 12 ;
22{ 13 return sc - s - 1;
23 char *xdest = dest;
24
25 if (n == 0)
26 return xdest;
27
28 __asm__ __volatile__
29 ("1:\tmoveb %1@+,%0@+\n\t"
30 "jeq 2f\n\t"
31 "subql #1,%2\n\t"
32 "jne 1b\n\t"
33 "2:"
34 : "=a" (dest), "=a" (src), "=d" (n)
35 : "0" (dest), "1" (src), "2" (n)
36 : "memory");
37 return xdest;
38} 14}
39 15
40#define __HAVE_ARCH_STRCAT 16static inline char *__kernel_strcpy(char *dest, const char *src)
41static inline char * strcat(char * dest, const char * src)
42{ 17{
43 char *tmp = dest; 18 char *xdest = dest;
44 19
45 while (*dest) 20 asm volatile ("\n"
46 dest++; 21 "1: move.b (%1)+,(%0)+\n"
47 while ((*dest++ = *src++)) 22 " jne 1b"
48 ; 23 : "+a" (dest), "+a" (src)
49 24 : : "memory");
50 return tmp; 25 return xdest;
51} 26}
52 27
53#define __HAVE_ARCH_STRNCAT 28#ifndef __IN_STRING_C
54static inline char * strncat(char *dest, const char *src, size_t count)
55{
56 char *tmp = dest;
57
58 if (count) {
59 while (*dest)
60 dest++;
61 while ((*dest++ = *src++)) {
62 if (--count == 0) {
63 *dest++='\0';
64 break;
65 }
66 }
67 }
68 29
69 return tmp; 30#define __HAVE_ARCH_STRLEN
70} 31#define strlen(s) (__builtin_constant_p(s) ? \
32 __builtin_strlen(s) : \
33 __kernel_strlen(s))
71 34
72#define __HAVE_ARCH_STRCHR 35#define __HAVE_ARCH_STRNLEN
73static inline char * strchr(const char * s, int c) 36static inline size_t strnlen(const char *s, size_t count)
74{ 37{
75 const char ch = c; 38 const char *sc = s;
76 39
77 for(; *s != ch; ++s) 40 asm volatile ("\n"
78 if (*s == '\0') 41 "1: subq.l #1,%1\n"
79 return( NULL ); 42 " jcs 2f\n"
80 return( (char *) s); 43 " tst.b (%0)+\n"
44 " jne 1b\n"
45 " subq.l #1,%0\n"
46 "2:"
47 : "+a" (sc), "+d" (count));
48 return sc - s;
81} 49}
82 50
83/* strstr !! */ 51#define __HAVE_ARCH_STRCPY
52#if __GNUC__ >= 4
53#define strcpy(d, s) (__builtin_constant_p(s) && \
54 __builtin_strlen(s) <= 32 ? \
55 __builtin_strcpy(d, s) : \
56 __kernel_strcpy(d, s))
57#else
58#define strcpy(d, s) __kernel_strcpy(d, s)
59#endif
84 60
85#define __HAVE_ARCH_STRLEN 61#define __HAVE_ARCH_STRNCPY
86static inline size_t strlen(const char * s) 62static inline char *strncpy(char *dest, const char *src, size_t n)
87{ 63{
88 const char *sc; 64 char *xdest = dest;
89 for (sc = s; *sc != '\0'; ++sc) ; 65
90 return(sc - s); 66 asm volatile ("\n"
67 " jra 2f\n"
68 "1: move.b (%1),(%0)+\n"
69 " jeq 2f\n"
70 " addq.l #1,%1\n"
71 "2: subq.l #1,%2\n"
72 " jcc 1b\n"
73 : "+a" (dest), "+a" (src), "+d" (n)
74 : : "memory");
75 return xdest;
91} 76}
92 77
93/* strnlen !! */ 78#define __HAVE_ARCH_STRCAT
79#define strcat(d, s) ({ \
80 char *__d = (d); \
81 strcpy(__d + strlen(__d), (s)); \
82})
94 83
95#define __HAVE_ARCH_STRCMP 84#define __HAVE_ARCH_STRCHR
96static inline int strcmp(const char * cs,const char * ct) 85static inline char *strchr(const char *s, int c)
97{ 86{
98 char __res; 87 char sc, ch = c;
99 88
100 __asm__ 89 for (; (sc = *s++) != ch; ) {
101 ("1:\tmoveb %0@+,%2\n\t" /* get *cs */ 90 if (!sc)
102 "cmpb %1@+,%2\n\t" /* compare a byte */ 91 return NULL;
103 "jne 2f\n\t" /* not equal, break out */ 92 }
104 "tstb %2\n\t" /* at end of cs? */ 93 return (char *)s - 1;
105 "jne 1b\n\t" /* no, keep going */
106 "jra 3f\n\t" /* strings are equal */
107 "2:\tsubb %1@-,%2\n\t" /* *cs - *ct */
108 "3:"
109 : "=a" (cs), "=a" (ct), "=d" (__res)
110 : "0" (cs), "1" (ct));
111 return __res;
112} 94}
113 95
114#define __HAVE_ARCH_STRNCMP 96#define __HAVE_ARCH_STRCMP
115static inline int strncmp(const char * cs,const char * ct,size_t count) 97static inline int strcmp(const char *cs, const char *ct)
116{ 98{
117 char __res; 99 char res;
118 100
119 if (!count) 101 asm ("\n"
120 return 0; 102 "1: move.b (%0)+,%2\n" /* get *cs */
121 __asm__ 103 " cmp.b (%1)+,%2\n" /* compare a byte */
122 ("1:\tmovb %0@+,%3\n\t" /* get *cs */ 104 " jne 2f\n" /* not equal, break out */
123 "cmpb %1@+,%3\n\t" /* compare a byte */ 105 " tst.b %2\n" /* at end of cs? */
124 "jne 3f\n\t" /* not equal, break out */ 106 " jne 1b\n" /* no, keep going */
125 "tstb %3\n\t" /* at end of cs? */ 107 " jra 3f\n" /* strings are equal */
126 "jeq 4f\n\t" /* yes, all done */ 108 "2: sub.b -(%1),%2\n" /* *cs - *ct */
127 "subql #1,%2\n\t" /* no, adjust count */ 109 "3:"
128 "jne 1b\n\t" /* more to do, keep going */ 110 : "+a" (cs), "+a" (ct), "=d" (res));
129 "2:\tmoveq #0,%3\n\t" /* strings are equal */ 111 return res;
130 "jra 4f\n\t"
131 "3:\tsubb %1@-,%3\n\t" /* *cs - *ct */
132 "4:"
133 : "=a" (cs), "=a" (ct), "=d" (count), "=d" (__res)
134 : "0" (cs), "1" (ct), "2" (count));
135 return __res;
136} 112}
137 113
138#define __HAVE_ARCH_MEMSET 114#define __HAVE_ARCH_MEMSET
@@ -150,4 +126,6 @@ extern void *memmove(void *, const void *, __kernel_size_t);
150extern int memcmp(const void *, const void *, __kernel_size_t); 126extern int memcmp(const void *, const void *, __kernel_size_t);
151#define memcmp(d, s, n) __builtin_memcmp(d, s, n) 127#define memcmp(d, s, n) __builtin_memcmp(d, s, n)
152 128
129#endif
130
153#endif /* _M68K_STRING_H_ */ 131#endif /* _M68K_STRING_H_ */
diff --git a/include/asm-m68k/system.h b/include/asm-m68k/system.h
index 131a0cb0f491..243dd13e6bfc 100644
--- a/include/asm-m68k/system.h
+++ b/include/asm-m68k/system.h
@@ -78,13 +78,13 @@ static inline int irqs_disabled(void)
78#define mb() barrier() 78#define mb() barrier()
79#define rmb() barrier() 79#define rmb() barrier()
80#define wmb() barrier() 80#define wmb() barrier()
81#define read_barrier_depends() do { } while(0) 81#define read_barrier_depends() ((void)0)
82#define set_mb(var, value) do { xchg(&var, value); } while (0) 82#define set_mb(var, value) ({ (var) = (value); wmb(); })
83 83
84#define smp_mb() barrier() 84#define smp_mb() barrier()
85#define smp_rmb() barrier() 85#define smp_rmb() barrier()
86#define smp_wmb() barrier() 86#define smp_wmb() barrier()
87#define smp_read_barrier_depends() do { } while(0) 87#define smp_read_barrier_depends() ((void)0)
88 88
89 89
90#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) 90#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
diff --git a/include/asm-m68k/user.h b/include/asm-m68k/user.h
index e8d5a64c7e79..d7c0b109bd45 100644
--- a/include/asm-m68k/user.h
+++ b/include/asm-m68k/user.h
@@ -81,7 +81,7 @@ struct user{
81 unsigned long magic; /* To uniquely identify a core file */ 81 unsigned long magic; /* To uniquely identify a core file */
82 char u_comm[32]; /* User command that was responsible */ 82 char u_comm[32]; /* User command that was responsible */
83}; 83};
84#define NBPG PAGE_SIZE 84#define NBPG 4096
85#define UPAGES 1 85#define UPAGES 1
86#define HOST_TEXT_START_ADDR (u.start_code) 86#define HOST_TEXT_START_ADDR (u.start_code)
87#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG) 87#define HOST_STACK_END_ADDR (u.start_stack + u.u_ssize * NBPG)
diff --git a/include/asm-s390/percpu.h b/include/asm-s390/percpu.h
index 495ad99c7635..9ea7f1023e57 100644
--- a/include/asm-s390/percpu.h
+++ b/include/asm-s390/percpu.h
@@ -16,7 +16,7 @@
16#if defined(__s390x__) && defined(MODULE) 16#if defined(__s390x__) && defined(MODULE)
17 17
18#define __reloc_hide(var,offset) (*({ \ 18#define __reloc_hide(var,offset) (*({ \
19 extern int simple_indentifier_##var(void); \ 19 extern int simple_identifier_##var(void); \
20 unsigned long *__ptr; \ 20 unsigned long *__ptr; \
21 asm ( "larl %0,per_cpu__"#var"@GOTENT" \ 21 asm ( "larl %0,per_cpu__"#var"@GOTENT" \
22 : "=a" (__ptr) : "X" (per_cpu__##var) ); \ 22 : "=a" (__ptr) : "X" (per_cpu__##var) ); \
@@ -25,7 +25,7 @@
25#else 25#else
26 26
27#define __reloc_hide(var, offset) (*({ \ 27#define __reloc_hide(var, offset) (*({ \
28 extern int simple_indentifier_##var(void); \ 28 extern int simple_identifier_##var(void); \
29 unsigned long __ptr; \ 29 unsigned long __ptr; \
30 asm ( "" : "=a" (__ptr) : "0" (&per_cpu__##var) ); \ 30 asm ( "" : "=a" (__ptr) : "0" (&per_cpu__##var) ); \
31 (typeof(&per_cpu__##var)) (__ptr + (offset)); })) 31 (typeof(&per_cpu__##var)) (__ptr + (offset)); }))
diff --git a/include/asm-um/irq_regs.h b/include/asm-um/irq_regs.h
new file mode 100644
index 000000000000..3dd9c0b70270
--- /dev/null
+++ b/include/asm-um/irq_regs.h
@@ -0,0 +1 @@
#include <asm-generic/irq_regs.h>
diff --git a/include/asm-x86_64/percpu.h b/include/asm-x86_64/percpu.h
index 285756010c51..5ed0ef340842 100644
--- a/include/asm-x86_64/percpu.h
+++ b/include/asm-x86_64/percpu.h
@@ -32,13 +32,13 @@
32 32
33/* var is in discarded region: offset to particular copy we want */ 33/* var is in discarded region: offset to particular copy we want */
34#define per_cpu(var, cpu) (*({ \ 34#define per_cpu(var, cpu) (*({ \
35 extern int simple_indentifier_##var(void); \ 35 extern int simple_identifier_##var(void); \
36 RELOC_HIDE(&per_cpu__##var, __per_cpu_offset(cpu)); })) 36 RELOC_HIDE(&per_cpu__##var, __per_cpu_offset(cpu)); }))
37#define __get_cpu_var(var) (*({ \ 37#define __get_cpu_var(var) (*({ \
38 extern int simple_indentifier_##var(void); \ 38 extern int simple_identifier_##var(void); \
39 RELOC_HIDE(&per_cpu__##var, __my_cpu_offset()); })) 39 RELOC_HIDE(&per_cpu__##var, __my_cpu_offset()); }))
40#define __raw_get_cpu_var(var) (*({ \ 40#define __raw_get_cpu_var(var) (*({ \
41 extern int simple_indentifier_##var(void); \ 41 extern int simple_identifier_##var(void); \
42 RELOC_HIDE(&per_cpu__##var, __my_cpu_offset()); })) 42 RELOC_HIDE(&per_cpu__##var, __my_cpu_offset()); }))
43 43
44/* A macro to avoid #include hell... */ 44/* A macro to avoid #include hell... */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index b7966ab8cb6a..26146623be2f 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -593,6 +593,7 @@ static inline int page_mapped(struct page *page)
593 */ 593 */
594#define NOPAGE_SIGBUS (NULL) 594#define NOPAGE_SIGBUS (NULL)
595#define NOPAGE_OOM ((struct page *) (-1)) 595#define NOPAGE_OOM ((struct page *) (-1))
596#define NOPAGE_REFAULT ((struct page *) (-2)) /* Return to userspace, rerun */
596 597
597/* 598/*
598 * Error return values for the *_nopfn functions 599 * Error return values for the *_nopfn functions
diff --git a/include/linux/mmc/protocol.h b/include/linux/mmc/protocol.h
index 81c3f77f652c..08dec8d9e703 100644
--- a/include/linux/mmc/protocol.h
+++ b/include/linux/mmc/protocol.h
@@ -83,6 +83,7 @@
83 83
84 /* Application commands */ 84 /* Application commands */
85#define SD_APP_SET_BUS_WIDTH 6 /* ac [1:0] bus width R1 */ 85#define SD_APP_SET_BUS_WIDTH 6 /* ac [1:0] bus width R1 */
86#define SD_APP_SEND_NUM_WR_BLKS 22 /* adtc R1 */
86#define SD_APP_OP_COND 41 /* bcr [31:0] OCR R3 */ 87#define SD_APP_OP_COND 41 /* bcr [31:0] OCR R3 */
87#define SD_APP_SEND_SCR 51 /* adtc R1 */ 88#define SD_APP_SEND_SCR 51 /* adtc R1 */
88 89
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 46ec72fa2c84..600e3d387ffc 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -19,7 +19,7 @@
19 * we force a syntax error here if it isn't. 19 * we force a syntax error here if it isn't.
20 */ 20 */
21#define get_cpu_var(var) (*({ \ 21#define get_cpu_var(var) (*({ \
22 extern int simple_indentifier_##var(void); \ 22 extern int simple_identifier_##var(void); \
23 preempt_disable(); \ 23 preempt_disable(); \
24 &__get_cpu_var(var); })) 24 &__get_cpu_var(var); }))
25#define put_cpu_var(var) preempt_enable() 25#define put_cpu_var(var) preempt_enable()
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 331f4502e92b..6735c1cf334c 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1065,9 +1065,10 @@ static inline int pid_alive(struct task_struct *p)
1065} 1065}
1066 1066
1067/** 1067/**
1068 * is_init - check if a task structure is the first user space 1068 * is_init - check if a task structure is init
1069 * task the kernel created. 1069 * @tsk: Task structure to be checked.
1070 * @p: Task structure to be checked. 1070 *
1071 * Check if a task structure is the first user space task the kernel created.
1071 */ 1072 */
1072static inline int is_init(struct task_struct *tsk) 1073static inline int is_init(struct task_struct *tsk)
1073{ 1074{
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index d6288e89fd9d..9c9a8ad92477 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -57,7 +57,8 @@ struct svc_serv {
57 struct svc_stat * sv_stats; /* RPC statistics */ 57 struct svc_stat * sv_stats; /* RPC statistics */
58 spinlock_t sv_lock; 58 spinlock_t sv_lock;
59 unsigned int sv_nrthreads; /* # of server threads */ 59 unsigned int sv_nrthreads; /* # of server threads */
60 unsigned int sv_bufsz; /* datagram buffer size */ 60 unsigned int sv_max_payload; /* datagram payload size */
61 unsigned int sv_max_mesg; /* max_payload + 1 page for overheads */
61 unsigned int sv_xdrsize; /* XDR buffer size */ 62 unsigned int sv_xdrsize; /* XDR buffer size */
62 63
63 struct list_head sv_permsocks; /* all permanent sockets */ 64 struct list_head sv_permsocks; /* all permanent sockets */
diff --git a/include/linux/timex.h b/include/linux/timex.h
index 049dfe4a11f2..db501dc23c29 100644
--- a/include/linux/timex.h
+++ b/include/linux/timex.h
@@ -293,6 +293,9 @@ extern void second_overflow(void);
293extern void update_ntp_one_tick(void); 293extern void update_ntp_one_tick(void);
294extern int do_adjtimex(struct timex *); 294extern int do_adjtimex(struct timex *);
295 295
296/* Don't use! Compatibility define for existing users. */
297#define tickadj (500/HZ ? : 1)
298
296#endif /* KERNEL */ 299#endif /* KERNEL */
297 300
298#endif /* LINUX_TIMEX_H */ 301#endif /* LINUX_TIMEX_H */
diff --git a/kernel/audit.c b/kernel/audit.c
index f9889ee77825..98106f6078b0 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -340,7 +340,7 @@ static int kauditd_thread(void *dummy)
340{ 340{
341 struct sk_buff *skb; 341 struct sk_buff *skb;
342 342
343 while (1) { 343 while (!kthread_should_stop()) {
344 skb = skb_dequeue(&audit_skb_queue); 344 skb = skb_dequeue(&audit_skb_queue);
345 wake_up(&audit_backlog_wait); 345 wake_up(&audit_backlog_wait);
346 if (skb) { 346 if (skb) {
@@ -369,6 +369,7 @@ static int kauditd_thread(void *dummy)
369 remove_wait_queue(&kauditd_wait, &wait); 369 remove_wait_queue(&kauditd_wait, &wait);
370 } 370 }
371 } 371 }
372 return 0;
372} 373}
373 374
374int audit_send_list(void *_dest) 375int audit_send_list(void *_dest)
diff --git a/lib/irq_regs.c b/lib/irq_regs.c
index 101b1a4f9b14..753880a5440c 100644
--- a/lib/irq_regs.c
+++ b/lib/irq_regs.c
@@ -8,8 +8,10 @@
8 * as published by the Free Software Foundation; either version 8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11#include <linux/module.h>
11#include <asm/irq_regs.h> 12#include <asm/irq_regs.h>
12 13
13#ifndef ARCH_HAS_OWN_IRQ_REGS 14#ifndef ARCH_HAS_OWN_IRQ_REGS
14DEFINE_PER_CPU(struct pt_regs *, __irq_regs); 15DEFINE_PER_CPU(struct pt_regs *, __irq_regs);
16EXPORT_PER_CPU_SYMBOL(__irq_regs);
15#endif 17#endif
diff --git a/mm/memory.c b/mm/memory.c
index 9cf3f341a28a..b5a4aadd961a 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1086,6 +1086,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1086 default: 1086 default:
1087 BUG(); 1087 BUG();
1088 } 1088 }
1089 cond_resched();
1089 } 1090 }
1090 if (pages) { 1091 if (pages) {
1091 pages[i] = page; 1092 pages[i] = page;
@@ -2169,11 +2170,13 @@ retry:
2169 * after the next truncate_count read. 2170 * after the next truncate_count read.
2170 */ 2171 */
2171 2172
2172 /* no page was available -- either SIGBUS or OOM */ 2173 /* no page was available -- either SIGBUS, OOM or REFAULT */
2173 if (new_page == NOPAGE_SIGBUS) 2174 if (unlikely(new_page == NOPAGE_SIGBUS))
2174 return VM_FAULT_SIGBUS; 2175 return VM_FAULT_SIGBUS;
2175 if (new_page == NOPAGE_OOM) 2176 else if (unlikely(new_page == NOPAGE_OOM))
2176 return VM_FAULT_OOM; 2177 return VM_FAULT_OOM;
2178 else if (unlikely(new_page == NOPAGE_REFAULT))
2179 return VM_FAULT_MINOR;
2177 2180
2178 /* 2181 /*
2179 * Should we do an early C-O-W break? 2182 * Should we do an early C-O-W break?
diff --git a/mm/slab.c b/mm/slab.c
index e9a63b5a7fb9..64fb0d770b06 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1106,15 +1106,18 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1106 int nodeid = slabp->nodeid; 1106 int nodeid = slabp->nodeid;
1107 struct kmem_list3 *l3; 1107 struct kmem_list3 *l3;
1108 struct array_cache *alien = NULL; 1108 struct array_cache *alien = NULL;
1109 int node;
1110
1111 node = numa_node_id();
1109 1112
1110 /* 1113 /*
1111 * Make sure we are not freeing a object from another node to the array 1114 * Make sure we are not freeing a object from another node to the array
1112 * cache on this cpu. 1115 * cache on this cpu.
1113 */ 1116 */
1114 if (likely(slabp->nodeid == numa_node_id())) 1117 if (likely(slabp->nodeid == node))
1115 return 0; 1118 return 0;
1116 1119
1117 l3 = cachep->nodelists[numa_node_id()]; 1120 l3 = cachep->nodelists[node];
1118 STATS_INC_NODEFREES(cachep); 1121 STATS_INC_NODEFREES(cachep);
1119 if (l3->alien && l3->alien[nodeid]) { 1122 if (l3->alien && l3->alien[nodeid]) {
1120 alien = l3->alien[nodeid]; 1123 alien = l3->alien[nodeid];
@@ -1352,6 +1355,7 @@ void __init kmem_cache_init(void)
1352 struct cache_names *names; 1355 struct cache_names *names;
1353 int i; 1356 int i;
1354 int order; 1357 int order;
1358 int node;
1355 1359
1356 for (i = 0; i < NUM_INIT_LISTS; i++) { 1360 for (i = 0; i < NUM_INIT_LISTS; i++) {
1357 kmem_list3_init(&initkmem_list3[i]); 1361 kmem_list3_init(&initkmem_list3[i]);
@@ -1386,12 +1390,14 @@ void __init kmem_cache_init(void)
1386 * 6) Resize the head arrays of the kmalloc caches to their final sizes. 1390 * 6) Resize the head arrays of the kmalloc caches to their final sizes.
1387 */ 1391 */
1388 1392
1393 node = numa_node_id();
1394
1389 /* 1) create the cache_cache */ 1395 /* 1) create the cache_cache */
1390 INIT_LIST_HEAD(&cache_chain); 1396 INIT_LIST_HEAD(&cache_chain);
1391 list_add(&cache_cache.next, &cache_chain); 1397 list_add(&cache_cache.next, &cache_chain);
1392 cache_cache.colour_off = cache_line_size(); 1398 cache_cache.colour_off = cache_line_size();
1393 cache_cache.array[smp_processor_id()] = &initarray_cache.cache; 1399 cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
1394 cache_cache.nodelists[numa_node_id()] = &initkmem_list3[CACHE_CACHE]; 1400 cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE];
1395 1401
1396 cache_cache.buffer_size = ALIGN(cache_cache.buffer_size, 1402 cache_cache.buffer_size = ALIGN(cache_cache.buffer_size,
1397 cache_line_size()); 1403 cache_line_size());
@@ -1496,19 +1502,18 @@ void __init kmem_cache_init(void)
1496 } 1502 }
1497 /* 5) Replace the bootstrap kmem_list3's */ 1503 /* 5) Replace the bootstrap kmem_list3's */
1498 { 1504 {
1499 int node; 1505 int nid;
1506
1500 /* Replace the static kmem_list3 structures for the boot cpu */ 1507 /* Replace the static kmem_list3 structures for the boot cpu */
1501 init_list(&cache_cache, &initkmem_list3[CACHE_CACHE], 1508 init_list(&cache_cache, &initkmem_list3[CACHE_CACHE], node);
1502 numa_node_id());
1503 1509
1504 for_each_online_node(node) { 1510 for_each_online_node(nid) {
1505 init_list(malloc_sizes[INDEX_AC].cs_cachep, 1511 init_list(malloc_sizes[INDEX_AC].cs_cachep,
1506 &initkmem_list3[SIZE_AC + node], node); 1512 &initkmem_list3[SIZE_AC + nid], nid);
1507 1513
1508 if (INDEX_AC != INDEX_L3) { 1514 if (INDEX_AC != INDEX_L3) {
1509 init_list(malloc_sizes[INDEX_L3].cs_cachep, 1515 init_list(malloc_sizes[INDEX_L3].cs_cachep,
1510 &initkmem_list3[SIZE_L3 + node], 1516 &initkmem_list3[SIZE_L3 + nid], nid);
1511 node);
1512 } 1517 }
1513 } 1518 }
1514 } 1519 }
@@ -2918,6 +2923,9 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
2918 int batchcount; 2923 int batchcount;
2919 struct kmem_list3 *l3; 2924 struct kmem_list3 *l3;
2920 struct array_cache *ac; 2925 struct array_cache *ac;
2926 int node;
2927
2928 node = numa_node_id();
2921 2929
2922 check_irq_off(); 2930 check_irq_off();
2923 ac = cpu_cache_get(cachep); 2931 ac = cpu_cache_get(cachep);
@@ -2931,7 +2939,7 @@ retry:
2931 */ 2939 */
2932 batchcount = BATCHREFILL_LIMIT; 2940 batchcount = BATCHREFILL_LIMIT;
2933 } 2941 }
2934 l3 = cachep->nodelists[numa_node_id()]; 2942 l3 = cachep->nodelists[node];
2935 2943
2936 BUG_ON(ac->avail > 0 || !l3); 2944 BUG_ON(ac->avail > 0 || !l3);
2937 spin_lock(&l3->list_lock); 2945 spin_lock(&l3->list_lock);
@@ -2961,7 +2969,7 @@ retry:
2961 STATS_SET_HIGH(cachep); 2969 STATS_SET_HIGH(cachep);
2962 2970
2963 ac->entry[ac->avail++] = slab_get_obj(cachep, slabp, 2971 ac->entry[ac->avail++] = slab_get_obj(cachep, slabp,
2964 numa_node_id()); 2972 node);
2965 } 2973 }
2966 check_slabp(cachep, slabp); 2974 check_slabp(cachep, slabp);
2967 2975
@@ -2980,7 +2988,7 @@ alloc_done:
2980 2988
2981 if (unlikely(!ac->avail)) { 2989 if (unlikely(!ac->avail)) {
2982 int x; 2990 int x;
2983 x = cache_grow(cachep, flags, numa_node_id()); 2991 x = cache_grow(cachep, flags, node);
2984 2992
2985 /* cache_grow can reenable interrupts, then ac could change. */ 2993 /* cache_grow can reenable interrupts, then ac could change. */
2986 ac = cpu_cache_get(cachep); 2994 ac = cpu_cache_get(cachep);
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index c2c8bb20d07f..2807fa0eab40 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -282,7 +282,10 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
282 serv->sv_program = prog; 282 serv->sv_program = prog;
283 serv->sv_nrthreads = 1; 283 serv->sv_nrthreads = 1;
284 serv->sv_stats = prog->pg_stats; 284 serv->sv_stats = prog->pg_stats;
285 serv->sv_bufsz = bufsize? bufsize : 4096; 285 if (bufsize > RPCSVC_MAXPAYLOAD)
286 bufsize = RPCSVC_MAXPAYLOAD;
287 serv->sv_max_payload = bufsize? bufsize : 4096;
288 serv->sv_max_mesg = roundup(serv->sv_max_payload + PAGE_SIZE, PAGE_SIZE);
286 serv->sv_shutdown = shutdown; 289 serv->sv_shutdown = shutdown;
287 xdrsize = 0; 290 xdrsize = 0;
288 while (prog) { 291 while (prog) {
@@ -414,9 +417,9 @@ svc_init_buffer(struct svc_rqst *rqstp, unsigned int size)
414 int pages; 417 int pages;
415 int arghi; 418 int arghi;
416 419
417 if (size > RPCSVC_MAXPAYLOAD) 420 pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply.
418 size = RPCSVC_MAXPAYLOAD; 421 * We assume one is at most one page
419 pages = 2 + (size+ PAGE_SIZE -1) / PAGE_SIZE; 422 */
420 arghi = 0; 423 arghi = 0;
421 BUG_ON(pages > RPCSVC_MAXPAGES); 424 BUG_ON(pages > RPCSVC_MAXPAGES);
422 while (pages) { 425 while (pages) {
@@ -463,7 +466,7 @@ __svc_create_thread(svc_thread_fn func, struct svc_serv *serv,
463 466
464 if (!(rqstp->rq_argp = kmalloc(serv->sv_xdrsize, GFP_KERNEL)) 467 if (!(rqstp->rq_argp = kmalloc(serv->sv_xdrsize, GFP_KERNEL))
465 || !(rqstp->rq_resp = kmalloc(serv->sv_xdrsize, GFP_KERNEL)) 468 || !(rqstp->rq_resp = kmalloc(serv->sv_xdrsize, GFP_KERNEL))
466 || !svc_init_buffer(rqstp, serv->sv_bufsz)) 469 || !svc_init_buffer(rqstp, serv->sv_max_mesg))
467 goto out_thread; 470 goto out_thread;
468 471
469 serv->sv_nrthreads++; 472 serv->sv_nrthreads++;
@@ -938,8 +941,8 @@ u32 svc_max_payload(const struct svc_rqst *rqstp)
938 941
939 if (rqstp->rq_sock->sk_sock->type == SOCK_DGRAM) 942 if (rqstp->rq_sock->sk_sock->type == SOCK_DGRAM)
940 max = RPCSVC_MAXPAYLOAD_UDP; 943 max = RPCSVC_MAXPAYLOAD_UDP;
941 if (rqstp->rq_server->sv_bufsz < max) 944 if (rqstp->rq_server->sv_max_payload < max)
942 max = rqstp->rq_server->sv_bufsz; 945 max = rqstp->rq_server->sv_max_payload;
943 return max; 946 return max;
944} 947}
945EXPORT_SYMBOL_GPL(svc_max_payload); 948EXPORT_SYMBOL_GPL(svc_max_payload);
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index b39e7e2b648f..61e307cca13d 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -192,13 +192,13 @@ svc_sock_enqueue(struct svc_sock *svsk)
192 svsk->sk_pool = pool; 192 svsk->sk_pool = pool;
193 193
194 set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags); 194 set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
195 if (((atomic_read(&svsk->sk_reserved) + serv->sv_bufsz)*2 195 if (((atomic_read(&svsk->sk_reserved) + serv->sv_max_mesg)*2
196 > svc_sock_wspace(svsk)) 196 > svc_sock_wspace(svsk))
197 && !test_bit(SK_CLOSE, &svsk->sk_flags) 197 && !test_bit(SK_CLOSE, &svsk->sk_flags)
198 && !test_bit(SK_CONN, &svsk->sk_flags)) { 198 && !test_bit(SK_CONN, &svsk->sk_flags)) {
199 /* Don't enqueue while not enough space for reply */ 199 /* Don't enqueue while not enough space for reply */
200 dprintk("svc: socket %p no space, %d*2 > %ld, not enqueued\n", 200 dprintk("svc: socket %p no space, %d*2 > %ld, not enqueued\n",
201 svsk->sk_sk, atomic_read(&svsk->sk_reserved)+serv->sv_bufsz, 201 svsk->sk_sk, atomic_read(&svsk->sk_reserved)+serv->sv_max_mesg,
202 svc_sock_wspace(svsk)); 202 svc_sock_wspace(svsk));
203 svsk->sk_pool = NULL; 203 svsk->sk_pool = NULL;
204 clear_bit(SK_BUSY, &svsk->sk_flags); 204 clear_bit(SK_BUSY, &svsk->sk_flags);
@@ -220,7 +220,7 @@ svc_sock_enqueue(struct svc_sock *svsk)
220 rqstp, rqstp->rq_sock); 220 rqstp, rqstp->rq_sock);
221 rqstp->rq_sock = svsk; 221 rqstp->rq_sock = svsk;
222 atomic_inc(&svsk->sk_inuse); 222 atomic_inc(&svsk->sk_inuse);
223 rqstp->rq_reserved = serv->sv_bufsz; 223 rqstp->rq_reserved = serv->sv_max_mesg;
224 atomic_add(rqstp->rq_reserved, &svsk->sk_reserved); 224 atomic_add(rqstp->rq_reserved, &svsk->sk_reserved);
225 BUG_ON(svsk->sk_pool != pool); 225 BUG_ON(svsk->sk_pool != pool);
226 wake_up(&rqstp->rq_wait); 226 wake_up(&rqstp->rq_wait);
@@ -639,8 +639,8 @@ svc_udp_recvfrom(struct svc_rqst *rqstp)
639 * which will access the socket. 639 * which will access the socket.
640 */ 640 */
641 svc_sock_setbufsize(svsk->sk_sock, 641 svc_sock_setbufsize(svsk->sk_sock,
642 (serv->sv_nrthreads+3) * serv->sv_bufsz, 642 (serv->sv_nrthreads+3) * serv->sv_max_mesg,
643 (serv->sv_nrthreads+3) * serv->sv_bufsz); 643 (serv->sv_nrthreads+3) * serv->sv_max_mesg);
644 644
645 if ((rqstp->rq_deferred = svc_deferred_dequeue(svsk))) { 645 if ((rqstp->rq_deferred = svc_deferred_dequeue(svsk))) {
646 svc_sock_received(svsk); 646 svc_sock_received(svsk);
@@ -749,8 +749,8 @@ svc_udp_init(struct svc_sock *svsk)
749 * svc_udp_recvfrom will re-adjust if necessary 749 * svc_udp_recvfrom will re-adjust if necessary
750 */ 750 */
751 svc_sock_setbufsize(svsk->sk_sock, 751 svc_sock_setbufsize(svsk->sk_sock,
752 3 * svsk->sk_server->sv_bufsz, 752 3 * svsk->sk_server->sv_max_mesg,
753 3 * svsk->sk_server->sv_bufsz); 753 3 * svsk->sk_server->sv_max_mesg);
754 754
755 set_bit(SK_DATA, &svsk->sk_flags); /* might have come in before data_ready set up */ 755 set_bit(SK_DATA, &svsk->sk_flags); /* might have come in before data_ready set up */
756 set_bit(SK_CHNGBUF, &svsk->sk_flags); 756 set_bit(SK_CHNGBUF, &svsk->sk_flags);
@@ -993,8 +993,8 @@ svc_tcp_recvfrom(struct svc_rqst *rqstp)
993 * as soon a a complete request arrives. 993 * as soon a a complete request arrives.
994 */ 994 */
995 svc_sock_setbufsize(svsk->sk_sock, 995 svc_sock_setbufsize(svsk->sk_sock,
996 (serv->sv_nrthreads+3) * serv->sv_bufsz, 996 (serv->sv_nrthreads+3) * serv->sv_max_mesg,
997 3 * serv->sv_bufsz); 997 3 * serv->sv_max_mesg);
998 998
999 clear_bit(SK_DATA, &svsk->sk_flags); 999 clear_bit(SK_DATA, &svsk->sk_flags);
1000 1000
@@ -1032,7 +1032,7 @@ svc_tcp_recvfrom(struct svc_rqst *rqstp)
1032 } 1032 }
1033 svsk->sk_reclen &= 0x7fffffff; 1033 svsk->sk_reclen &= 0x7fffffff;
1034 dprintk("svc: TCP record, %d bytes\n", svsk->sk_reclen); 1034 dprintk("svc: TCP record, %d bytes\n", svsk->sk_reclen);
1035 if (svsk->sk_reclen > serv->sv_bufsz) { 1035 if (svsk->sk_reclen > serv->sv_max_mesg) {
1036 printk(KERN_NOTICE "RPC: bad TCP reclen 0x%08lx (large)\n", 1036 printk(KERN_NOTICE "RPC: bad TCP reclen 0x%08lx (large)\n",
1037 (unsigned long) svsk->sk_reclen); 1037 (unsigned long) svsk->sk_reclen);
1038 goto err_delete; 1038 goto err_delete;
@@ -1171,8 +1171,8 @@ svc_tcp_init(struct svc_sock *svsk)
1171 * svc_tcp_recvfrom will re-adjust if necessary 1171 * svc_tcp_recvfrom will re-adjust if necessary
1172 */ 1172 */
1173 svc_sock_setbufsize(svsk->sk_sock, 1173 svc_sock_setbufsize(svsk->sk_sock,
1174 3 * svsk->sk_server->sv_bufsz, 1174 3 * svsk->sk_server->sv_max_mesg,
1175 3 * svsk->sk_server->sv_bufsz); 1175 3 * svsk->sk_server->sv_max_mesg);
1176 1176
1177 set_bit(SK_CHNGBUF, &svsk->sk_flags); 1177 set_bit(SK_CHNGBUF, &svsk->sk_flags);
1178 set_bit(SK_DATA, &svsk->sk_flags); 1178 set_bit(SK_DATA, &svsk->sk_flags);
@@ -1234,7 +1234,7 @@ svc_recv(struct svc_rqst *rqstp, long timeout)
1234 1234
1235 1235
1236 /* now allocate needed pages. If we get a failure, sleep briefly */ 1236 /* now allocate needed pages. If we get a failure, sleep briefly */
1237 pages = 2 + (serv->sv_bufsz + PAGE_SIZE -1) / PAGE_SIZE; 1237 pages = (serv->sv_max_mesg + PAGE_SIZE) / PAGE_SIZE;
1238 for (i=0; i < pages ; i++) 1238 for (i=0; i < pages ; i++)
1239 while (rqstp->rq_pages[i] == NULL) { 1239 while (rqstp->rq_pages[i] == NULL) {
1240 struct page *p = alloc_page(GFP_KERNEL); 1240 struct page *p = alloc_page(GFP_KERNEL);
@@ -1263,7 +1263,7 @@ svc_recv(struct svc_rqst *rqstp, long timeout)
1263 if ((svsk = svc_sock_dequeue(pool)) != NULL) { 1263 if ((svsk = svc_sock_dequeue(pool)) != NULL) {
1264 rqstp->rq_sock = svsk; 1264 rqstp->rq_sock = svsk;
1265 atomic_inc(&svsk->sk_inuse); 1265 atomic_inc(&svsk->sk_inuse);
1266 rqstp->rq_reserved = serv->sv_bufsz; 1266 rqstp->rq_reserved = serv->sv_max_mesg;
1267 atomic_add(rqstp->rq_reserved, &svsk->sk_reserved); 1267 atomic_add(rqstp->rq_reserved, &svsk->sk_reserved);
1268 } else { 1268 } else {
1269 /* No data pending. Go to sleep */ 1269 /* No data pending. Go to sleep */