aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/sysdev
diff options
context:
space:
mode:
authorJeff Layton <jlayton@redhat.com>2013-05-09 08:36:23 -0400
committerJ. Bruce Fields <bfields@redhat.com>2013-05-09 12:58:36 -0400
commit7255e716b1757dc10fa5e3a4d2eaab303ff9f7b6 (patch)
treef3e9feeab51f72102b21c40c5779e32767e2f468 /arch/powerpc/sysdev
parentfb43f11c666a4f99f23f0be4fa528dcd288c0da2 (diff)
nfsd: fix oops when legacy_recdir_name_error is passed a -ENOENT error
Toralf reported the following oops to the linux-nfs mailing list: -----------------[snip]------------------ NFSD: unable to generate recoverydir name (-2). NFSD: disabling legacy clientid tracking. Reboot recovery will not function correctly! BUG: unable to handle kernel NULL pointer dereference at 000003c8 IP: [<f90a3d91>] nfsd4_client_tracking_exit+0x11/0x50 [nfsd] *pdpt = 000000002ba33001 *pde = 0000000000000000 Oops: 0000 [#1] SMP Modules linked in: loop nfsd auth_rpcgss ipt_MASQUERADE xt_owner xt_multiport ipt_REJECT xt_tcpudp xt_recent xt_conntrack nf_conntrack_ftp xt_limit xt_LOG iptable_nat nf_conntrack_ipv4 nf_defrag_ipv4 nf_nat_ipv4 nf_nat nf_conntrack iptable_filter ip_tables x_tables af_packet pppoe pppox ppp_generic slhc bridge stp llc tun arc4 iwldvm mac80211 coretemp kvm_intel uvcvideo sdhci_pci sdhci mmc_core videobuf2_vmalloc videobuf2_memops usblp videobuf2_core i915 iwlwifi psmouse videodev cfg80211 kvm fbcon bitblit cfbfillrect acpi_cpufreq mperf evdev softcursor font cfbimgblt i2c_algo_bit cfbcopyarea intel_agp intel_gtt drm_kms_helper snd_hda_codec_conexant drm agpgart fb fbdev tpm_tis thinkpad_acpi tpm nvram e1000e rfkill thermal ptp wmi pps_core tpm_bios 8250_pci processor 8250 ac snd_hda_intel snd_hda_codec snd_pcm battery video i2c_i801 snd_page_alloc snd_timer button serial_core i2c_core snd soundcore thermal_sys hwmon aesni_intel ablk_helper cryp td lrw aes_i586 xts gf128mul cbc fuse nfs lockd sunrpc dm_crypt dm_mod hid_monterey hid_microsoft hid_logitech hid_ezkey hid_cypress hid_chicony hid_cherry hid_belkin hid_apple hid_a4tech hid_generic usbhid hid sr_mod cdrom sg [last unloaded: microcode] Pid: 6374, comm: nfsd Not tainted 3.9.1 #6 LENOVO 4180F65/4180F65 EIP: 0060:[<f90a3d91>] EFLAGS: 00010202 CPU: 0 EIP is at nfsd4_client_tracking_exit+0x11/0x50 [nfsd] EAX: 00000000 EBX: fffffffe ECX: 00000007 EDX: 00000007 ESI: eb9dcb00 EDI: eb2991c0 EBP: eb2bde38 ESP: eb2bde34 DS: 007b ES: 007b FS: 00d8 GS: 00e0 SS: 0068 CR0: 80050033 CR2: 000003c8 CR3: 2ba80000 CR4: 000407f0 DR0: 00000000 DR1: 00000000 DR2: 00000000 DR3: 00000000 DR6: ffff0ff0 DR7: 00000400 Process nfsd (pid: 6374, ti=eb2bc000 task=eb2711c0 task.ti=eb2bc000) Stack: fffffffe eb2bde4c f90a3e0c f90a7754 fffffffe eb0a9c00 eb2bdea0 f90a41ed eb2991c0 1b270000 eb2991c0 eb2bde7c f9099ce9 eb2bde98 0129a020 eb29a020 eb2bdecc eb2991c0 eb2bdea8 f9099da5 00000000 eb9dcb00 00000001 67822f08 Call Trace: [<f90a3e0c>] legacy_recdir_name_error+0x3c/0x40 [nfsd] [<f90a41ed>] nfsd4_create_clid_dir+0x15d/0x1c0 [nfsd] [<f9099ce9>] ? nfsd4_lookup_stateid+0x99/0xd0 [nfsd] [<f9099da5>] ? nfs4_preprocess_seqid_op+0x85/0x100 [nfsd] [<f90a4287>] nfsd4_client_record_create+0x37/0x50 [nfsd] [<f909d6ce>] nfsd4_open_confirm+0xfe/0x130 [nfsd] [<f90980b1>] ? nfsd4_encode_operation+0x61/0x90 [nfsd] [<f909d5d0>] ? nfsd4_free_stateid+0xc0/0xc0 [nfsd] [<f908fd0b>] nfsd4_proc_compound+0x41b/0x530 [nfsd] [<f9081b7b>] nfsd_dispatch+0x8b/0x1a0 [nfsd] [<f857b85d>] svc_process+0x3dd/0x640 [sunrpc] [<f908165d>] nfsd+0xad/0x110 [nfsd] [<f90815b0>] ? nfsd_destroy+0x70/0x70 [nfsd] [<c1054824>] kthread+0x94/0xa0 [<c1486937>] ret_from_kernel_thread+0x1b/0x28 [<c1054790>] ? flush_kthread_work+0xd0/0xd0 Code: 86 b0 00 00 00 90 c5 0a f9 c7 04 24 70 76 0a f9 e8 74 a9 3d c8 eb ba 8d 76 00 55 89 e5 53 66 66 66 66 90 8b 15 68 c7 0a f9 85 d2 <8b> 88 c8 03 00 00 74 2c 3b 11 77 28 8b 5c 91 08 85 db 74 22 8b EIP: [<f90a3d91>] nfsd4_client_tracking_exit+0x11/0x50 [nfsd] SS:ESP 0068:eb2bde34 CR2: 00000000000003c8 ---[ end trace 09e54015d145c9c6 ]--- The problem appears to be a regression that was introduced in commit 9a9c6478 "nfsd: make NFSv4 recovery client tracking options per net". Prior to that commit, it was safe to pass a NULL net pointer to nfsd4_client_tracking_exit in the legacy recdir case, and legacy_recdir_name_error did so. After that comit, the net pointer must be valid. This patch just fixes legacy_recdir_name_error to pass in a valid net pointer to that function. Cc: <stable@vger.kernel.org> # v3.8+ Cc: Stanislav Kinsbursky <skinsbursky@parallels.com> Reported-and-tested-by: Toralf Förster <toralf.foerster@gmx.de> Signed-off-by: Jeff Layton <jlayton@redhat.com> Signed-off-by: J. Bruce Fields <bfields@redhat.com>
Diffstat (limited to 'arch/powerpc/sysdev')
0 files changed, 0 insertions, 0 deletions
705e87c0c3c3
6606c3e0da53
1da177e4c3f4
0697212a411c

1da177e4c3f4
4b10e7d562c9
1da177e4c3f4
1ea0704e0da6
4b10e7d562c9







9532fec118d4





4b10e7d562c9







1ea0704e0da6
c1e6098b23bb



4b10e7d562c9
c1e6098b23bb
4b10e7d562c9

1ea0704e0da6
4b10e7d562c9

1ea0704e0da6
ce1744f4ed20
0697212a411c










7da4d641c58d
1da177e4c3f4

6606c3e0da53
705e87c0c3c3
7da4d641c58d
9532fec118d4
7da4d641c58d
1da177e4c3f4

4b10e7d562c9

7d12efaea7e7
4b10e7d562c9






7d12efaea7e7
4b10e7d562c9

1da177e4c3f4
4b10e7d562c9
1da177e4c3f4
7d12efaea7e7


1da177e4c3f4


7da4d641c58d
9532fec118d4
1da177e4c3f4



cd7548ab360c

e180377f1ae4
7d12efaea7e7

e86100b54cd4
cd7548ab360c
7da4d641c58d
cd7548ab360c

1da177e4c3f4

4b10e7d562c9
9532fec118d4
4b10e7d562c9
9532fec118d4






4b10e7d562c9
1da177e4c3f4
7da4d641c58d

1da177e4c3f4

7d12efaea7e7


1da177e4c3f4


7da4d641c58d
1da177e4c3f4





7da4d641c58d
4b10e7d562c9
1da177e4c3f4
7da4d641c58d

1da177e4c3f4

7da4d641c58d
c1e6098b23bb
4b10e7d562c9
1da177e4c3f4




7da4d641c58d
1da177e4c3f4



1da177e4c3f4



7da4d641c58d
4b10e7d562c9
1da177e4c3f4
7da4d641c58d
1233d5882107


7da4d641c58d





4b10e7d562c9
7da4d641c58d







4b10e7d562c9
7da4d641c58d


1da177e4c3f4

b6a2fea39318
1da177e4c3f4






1da177e4c3f4

c1e6098b23bb
1da177e4c3f4








5a6fe1259506

1da177e4c3f4

5a6fe1259506
cdfd4325c0d8
1da177e4c3f4
191c542442fd
1da177e4c3f4




1da177e4c3f4






























1c12c4cf9411


c1e6098b23bb
1ddd439ef987
c1e6098b23bb

d08b3851da41
7d12efaea7e7

7da4d641c58d
ab50b8ed8180

63bfd7384b11
1da177e4c3f4






6a6160a7b5c2

1da177e4c3f4
















b845f313d78e
1da177e4c3f4





b344e05c5854
1da177e4c3f4





097d59106a8e
1da177e4c3f4


097d59106a8e
1da177e4c3f4






7d12efaea7e7
1da177e4c3f4














7d12efaea7e7
1da177e4c3f4
7d12efaea7e7

1da177e4c3f4
7e2cff42cfac

1da177e4c3f4






























1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419





                                        
                                                                  




                                                       







                              

                          
                               
                          
                             




                           






                                                                        
                                                                             
                                                                        
                                                                              
 
                                          
                           
                        
                                

                                  
 
                                                       
                                   
            

                                          
                                    
                                             
 
                                                                      







                                                                         





                                                                         







                                                                          
 



                                                                            
                                                                    
                                                           

                                               
 

                                        
                                                                      
                                                                               










                                                                     
                                

                                                        
                                   
                                       
 
                                           
                     

 

                                                                                
                                                  






                                                                                
                                                  

              
 
                                  
 


                                                                        


                           
                                
                           



                                               

                                                          
                                                                    

                                                                         
                                        
                                         
                         

                                          

                                               
                                                                        
                                                                               
 






                                                                              
                                                                   
                                                  

                     

 


                                                                        


                           
                                





                                               
                                                                        
                                                               
                                                  

                     

 
                                                                        
                                                                        
                                                     




                                          
                                



                                          



                                               
                                                                        
                                                               
                                                  
 


                                                                     





                                                                                
                                                            







                                                                            
                                                                                                        


                                                          

 
   






                                                                         

                      
                                  








                                                                        

                                                                         

                                  
                                                                 
                                                                           
                                          
                                                                      




                                               






























                                                                             


                                                                      
                                         
                                                                            

                                      
 

                                                             
 

                                                              
                             






                                  

                                                            
















                                                                       
                                      





                                                                    
                                                                             





                                           
                                           


                         
                            






                                                    
                














                                                          
                                                                              
 

                                                                              
 

                                                                                     






























                                                                          
/*
 *  mm/mprotect.c
 *
 *  (C) Copyright 1994 Linus Torvalds
 *  (C) Copyright 2002 Christoph Hellwig
 *
 *  Address space accounting code	<alan@lxorguk.ukuu.org.uk>
 *  (C) Copyright 2002 Red Hat Inc, All Rights Reserved
 */

#include <linux/mm.h>
#include <linux/hugetlb.h>
#include <linux/shm.h>
#include <linux/mman.h>
#include <linux/fs.h>
#include <linux/highmem.h>
#include <linux/security.h>
#include <linux/mempolicy.h>
#include <linux/personality.h>
#include <linux/syscalls.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/mmu_notifier.h>
#include <linux/migrate.h>
#include <linux/perf_event.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>

#ifndef pgprot_modify
static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
{
	return newprot;
}
#endif

static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
		unsigned long addr, unsigned long end, pgprot_t newprot,
		int dirty_accountable, int prot_numa, bool *ret_all_same_node)
{
	struct mm_struct *mm = vma->vm_mm;
	pte_t *pte, oldpte;
	spinlock_t *ptl;
	unsigned long pages = 0;
	bool all_same_node = true;
	int last_nid = -1;

	pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
	arch_enter_lazy_mmu_mode();
	do {
		oldpte = *pte;
		if (pte_present(oldpte)) {
			pte_t ptent;
			bool updated = false;

			ptent = ptep_modify_prot_start(mm, addr, pte);
			if (!prot_numa) {
				ptent = pte_modify(ptent, newprot);
				updated = true;
			} else {
				struct page *page;

				page = vm_normal_page(vma, addr, oldpte);
				if (page) {
					int this_nid = page_to_nid(page);
					if (last_nid == -1)
						last_nid = this_nid;
					if (last_nid != this_nid)
						all_same_node = false;

					/* only check non-shared pages */
					if (!pte_numa(oldpte) &&
					    page_mapcount(page) == 1) {
						ptent = pte_mknuma(ptent);
						updated = true;
					}
				}
			}

			/*
			 * Avoid taking write faults for pages we know to be
			 * dirty.
			 */
			if (dirty_accountable && pte_dirty(ptent)) {
				ptent = pte_mkwrite(ptent);
				updated = true;
			}

			if (updated)
				pages++;
			ptep_modify_prot_commit(mm, addr, pte, ptent);
		} else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) {
			swp_entry_t entry = pte_to_swp_entry(oldpte);

			if (is_write_migration_entry(entry)) {
				/*
				 * A protection check is difficult so
				 * just be safe and disable write
				 */
				make_migration_entry_read(&entry);
				set_pte_at(mm, addr, pte,
					swp_entry_to_pte(entry));
			}
			pages++;
		}
	} while (pte++, addr += PAGE_SIZE, addr != end);
	arch_leave_lazy_mmu_mode();
	pte_unmap_unlock(pte - 1, ptl);

	*ret_all_same_node = all_same_node;
	return pages;
}

#ifdef CONFIG_NUMA_BALANCING
static inline void change_pmd_protnuma(struct mm_struct *mm, unsigned long addr,
				       pmd_t *pmd)
{
	spin_lock(&mm->page_table_lock);
	set_pmd_at(mm, addr & PMD_MASK, pmd, pmd_mknuma(*pmd));
	spin_unlock(&mm->page_table_lock);
}
#else
static inline void change_pmd_protnuma(struct mm_struct *mm, unsigned long addr,
				       pmd_t *pmd)
{
	BUG();
}
#endif /* CONFIG_NUMA_BALANCING */

static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
		pud_t *pud, unsigned long addr, unsigned long end,
		pgprot_t newprot, int dirty_accountable, int prot_numa)
{
	pmd_t *pmd;
	unsigned long next;
	unsigned long pages = 0;
	bool all_same_node;

	pmd = pmd_offset(pud, addr);
	do {
		next = pmd_addr_end(addr, end);
		if (pmd_trans_huge(*pmd)) {
			if (next - addr != HPAGE_PMD_SIZE)
				split_huge_page_pmd(vma, addr, pmd);
			else if (change_huge_pmd(vma, pmd, addr, newprot,
						 prot_numa)) {
				pages++;
				continue;
			}
			/* fall through */
		}
		if (pmd_none_or_clear_bad(pmd))
			continue;
		pages += change_pte_range(vma, pmd, addr, next, newprot,
				 dirty_accountable, prot_numa, &all_same_node);

		/*
		 * If we are changing protections for NUMA hinting faults then
		 * set pmd_numa if the examined pages were all on the same
		 * node. This allows a regular PMD to be handled as one fault
		 * and effectively batches the taking of the PTL
		 */
		if (prot_numa && all_same_node)
			change_pmd_protnuma(vma->vm_mm, addr, pmd);
	} while (pmd++, addr = next, addr != end);

	return pages;
}

static inline unsigned long change_pud_range(struct vm_area_struct *vma,
		pgd_t *pgd, unsigned long addr, unsigned long end,
		pgprot_t newprot, int dirty_accountable, int prot_numa)
{
	pud_t *pud;
	unsigned long next;
	unsigned long pages = 0;

	pud = pud_offset(pgd, addr);
	do {
		next = pud_addr_end(addr, end);
		if (pud_none_or_clear_bad(pud))
			continue;
		pages += change_pmd_range(vma, pud, addr, next, newprot,
				 dirty_accountable, prot_numa);
	} while (pud++, addr = next, addr != end);

	return pages;
}

static unsigned long change_protection_range(struct vm_area_struct *vma,
		unsigned long addr, unsigned long end, pgprot_t newprot,
		int dirty_accountable, int prot_numa)
{
	struct mm_struct *mm = vma->vm_mm;
	pgd_t *pgd;
	unsigned long next;
	unsigned long start = addr;
	unsigned long pages = 0;

	BUG_ON(addr >= end);
	pgd = pgd_offset(mm, addr);
	flush_cache_range(vma, addr, end);
	do {
		next = pgd_addr_end(addr, end);
		if (pgd_none_or_clear_bad(pgd))
			continue;
		pages += change_pud_range(vma, pgd, addr, next, newprot,
				 dirty_accountable, prot_numa);
	} while (pgd++, addr = next, addr != end);

	/* Only flush the TLB if we actually modified any entries: */
	if (pages)
		flush_tlb_range(vma, start, end);

	return pages;
}

unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
		       unsigned long end, pgprot_t newprot,
		       int dirty_accountable, int prot_numa)
{
	struct mm_struct *mm = vma->vm_mm;
	unsigned long pages;

	mmu_notifier_invalidate_range_start(mm, start, end);
	if (is_vm_hugetlb_page(vma))
		pages = hugetlb_change_protection(vma, start, end, newprot);
	else
		pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa);
	mmu_notifier_invalidate_range_end(mm, start, end);

	return pages;
}

int
mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
	unsigned long start, unsigned long end, unsigned long newflags)
{
	struct mm_struct *mm = vma->vm_mm;
	unsigned long oldflags = vma->vm_flags;
	long nrpages = (end - start) >> PAGE_SHIFT;
	unsigned long charged = 0;
	pgoff_t pgoff;
	int error;
	int dirty_accountable = 0;

	if (newflags == oldflags) {
		*pprev = vma;
		return 0;
	}

	/*
	 * If we make a private mapping writable we increase our commit;
	 * but (without finer accounting) cannot reduce our commit if we
	 * make it unwritable again. hugetlb mapping were accounted for
	 * even if read-only so there is no need to account for them here
	 */
	if (newflags & VM_WRITE) {
		if (!(oldflags & (VM_ACCOUNT|VM_WRITE|VM_HUGETLB|
						VM_SHARED|VM_NORESERVE))) {
			charged = nrpages;
			if (security_vm_enough_memory_mm(mm, charged))
				return -ENOMEM;
			newflags |= VM_ACCOUNT;
		}