aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/char/hpet.c
blob: 051474c65b783c5e9f3315de2fb3732516810027 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
/*
 * Intel & MS High Precision Event Timer Implementation.
 *
 * Copyright (C) 2003 Intel Corporation
 *	Venki Pallipadi
 * (c) Copyright 2004 Hewlett-Packard Development Company, L.P.
 *	Bob Picco <robert.picco@hp.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/miscdevice.h>
#include <linux/major.h>
#include <linux/ioport.h>
#include <linux/fcntl.h>
#include <linux/init.h>
#include <linux/poll.h>
#include <linux/mm.h>
#include <linux/proc_fs.h>
#include <linux/spinlock.h>
#include <linux/sysctl.h>
#include <linux/wait.h>
#include <linux/bcd.h>
#include <linux/seq_file.h>
#include <linux/bitops.h>
#include <linux/compat.h>
#include <linux/clocksource.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/io.h>

#include <asm/current.h>
#include <asm/system.h>
#include <asm/irq.h>
#include <asm/div64.h>

#include <linux/acpi.h>
#include <acpi/acpi_bus.h>
#include <linux/hpet.h>

/*
 * The High Precision Event Timer driver.
 * This driver is closely modelled after the rtc.c driver.
 * http://www.intel.com/hardwaredesign/hpetspec_1.pdf
 */
#define	HPET_USER_FREQ	(64)
#define	HPET_DRIFT	(500)

#define HPET_RANGE_SIZE		1024	/* from HPET spec */


/* WARNING -- don't get confused.  These macros are never used
 * to write the (single) counter, and rarely to read it.
 * They're badly named; to fix, someday.
 */
#if BITS_PER_LONG == 64
#define	write_counter(V, MC)	writeq(V, MC)
#define	read_counter(MC)	readq(MC)
#else
#define	write_counter(V, MC)	writel(V, MC)
#define	read_counter(MC)	readl(MC)
#endif

static DEFINE_MUTEX(hpet_mutex); /* replaces BKL */
static u32 hpet_nhpet, hpet_max_freq = HPET_USER_FREQ;

/* This clocksource driver currently only works on ia64 */
#ifdef CONFIG_IA64
static void __iomem *hpet_mctr;

static cycle_t read_hpet(struct clocksource *cs)
{
	return (cycle_t)read_counter((void __iomem *)hpet_mctr);
}

static struct clocksource clocksource_hpet = {
	.name		= "hpet",
	.rating		= 250,
	.read		= read_hpet,
	.mask		= CLOCKSOURCE_MASK(64),
	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
};
static struct clocksource *hpet_clocksource;
#endif

/* A lock for concurrent access by app and isr hpet activity. */
static DEFINE_SPINLOCK(hpet_lock);

#define	HPET_DEV_NAME	(7)

struct hpet_dev {
	struct hpets *hd_hpets;
	struct hpet __iomem *hd_hpet;
	struct hpet_timer __iomem *hd_timer;
	unsigned long hd_ireqfreq;
	unsigned long hd_irqdata;
	wait_queue_head_t hd_waitqueue;
	struct fasync_struct *hd_async_queue;
	unsigned int hd_flags;
	unsigned int hd_irq;
	unsigned int hd_hdwirq;
	char hd_name[HPET_DEV_NAME];
};

struct hpets {
	struct hpets *hp_next;
	struct hpet __iomem *hp_hpet;
	unsigned long hp_hpet_phys;
	struct clocksource *hp_clocksource;
	unsigned long long hp_tick_freq;
	unsigned long hp_delta;
	unsigned int hp_ntimer;
	unsigned int hp_which;
	struct hpet_dev hp_dev[1];
};

static struct hpets *hpets;

#define	HPET_OPEN		0x0001
#define	HPET_IE			0x0002	/* interrupt enabled */
#define	HPET_PERIODIC		0x0004
#define	HPET_SHARED_IRQ		0x0008


#ifndef readq
static inline unsigned long long readq(void __iomem *addr)
{
	return readl(addr) | (((unsigned long long)readl(addr + 4)) << 32LL);
}
#endif

#ifndef writeq
static inline void writeq(unsigned long long v, void __iomem *addr)
{
	writel(v & 0xffffffff, addr);
	writel(v >> 32, addr + 4);
}
#endif

static irqreturn_t hpet_interrupt(int irq, void *data)
{
	struct hpet_dev *devp;
	unsigned long isr;

	devp = data;
	isr = 1 << (devp - devp->hd_hpets->hp_dev);

	if ((devp->hd_flags & HPET_SHARED_IRQ) &&
	    !(isr & readl(&devp->hd_hpet->hpet_isr)))
		return IRQ_NONE;

	spin_lock(&hpet_lock);
	devp->hd_irqdata++;

	/*
	 * For non-periodic timers, increment the accumulator.
	 * This has the effect of treating non-periodic like periodic.
	 */
	if ((devp->hd_flags & (HPET_IE | HPET_PERIODIC)) == HPET_IE) {
		unsigned long m, t;

		t = devp->hd_ireqfreq;
		m = read_counter(&devp->hd_timer->hpet_compare);
		write_counter(t + m, &devp->hd_timer->hpet_compare);
	}

	if (devp->hd_flags & HPET_SHARED_IRQ)
		writel(isr, &devp->hd_hpet->hpet_isr);
	spin_unlock(&hpet_lock);

	wake_up_interruptible(&devp->hd_waitqueue);

	kill_fasync(&devp->hd_async_queue, SIGIO, POLL_IN);

	return IRQ_HANDLED;
}

static void hpet_timer_set_irq(struct hpet_dev *devp)
{
	unsigned long v;
	int irq, gsi;
	struct hpet_timer __iomem *timer;

	spin_lock_irq(&hpet_lock);
	if (devp->hd_hdwirq) {
		spin_unlock_irq(&hpet_lock);
		return;
	}

	timer = devp->hd_timer;

	/* we prefer level triggered mode */
	v = readl(&timer->hpet_config);
	if (!(v & Tn_INT_TYPE_CNF_MASK)) {
		v |= Tn_INT_TYPE_CNF_MASK;
		writel(v, &timer->hpet_config);
	}
	spin_unlock_irq(&hpet_lock);

	v = (readq(&timer->hpet_config) & Tn_INT_ROUTE_CAP_MASK) >>
				 Tn_INT_ROUTE_CAP_SHIFT;

	/*
	 * In PIC mode, skip IRQ0-4, IRQ6-9, IRQ12-15 which is always used by
	 * legacy device. In IO APIC mode, we skip all the legacy IRQS.
	 */
	if (acpi_irq_model == ACPI_IRQ_MODEL_PIC)
		v &= ~0xf3df;
	else
		v &= ~0xffff;

	for_each_set_bit(irq, &v, HPET_MAX_IRQ) {
		if (irq >= nr_irqs) {
			irq = HPET_MAX_IRQ;
			break;
		}

		gsi = acpi_register_gsi(NULL, irq, ACPI_LEVEL_SENSITIVE,
					ACPI_ACTIVE_LOW);
		if (gsi > 0)
			break;

		/* FIXME: Setup interrupt source table */
	}

	if (irq < HPET_MAX_IRQ) {
		spin_lock_irq(&hpet_lock);
		v = readl(&timer->hpet_config);
		v |= irq << Tn_INT_ROUTE_CNF_SHIFT;
		writel(v, &timer->hpet_config);
		devp->hd_hdwirq = gsi;
		spin_unlock_irq(&hpet_lock);
	}
	return;
}

static int hpet_open(struct inode *inode, struct file *file)
{
	struct hpet_dev *devp;
	struct hpets *hpetp;
	int i;

	if (file->f_mode & FMODE_WRITE)
		return -EINVAL;

	mutex_lock(&hpet_mutex);
	spin_lock_irq(&hpet_lock);

	for (devp = NULL, hpetp = hpets; hpetp && !devp; hpetp = hpetp->hp_next)
		for (i = 0; i < hpetp->hp_ntimer; i++)
			if (hpetp->hp_dev[i].hd_flags & HPET_OPEN)
				continue;
			else {
				devp = &hpetp->hp_dev[i];
				break;
			}

	if (!devp) {
		spin_unlock_irq(&hpet_lock);
		mutex_unlock(&hpet_mutex);
		return -EBUSY;
	}

	file->private_data = devp;
	devp->hd_irqdata = 0;
	devp->hd_flags |= HPET_OPEN;
	spin_unlock_irq(&hpet_lock);
	mutex_unlock(&hpet_mutex);

	hpet_timer_set_irq(devp);

	return 0;
}

static ssize_t
hpet_read(struct file *file, char __user *buf, size_t count, loff_t * ppos)
{
	DECLARE_WAITQUEUE(wait, current);
	unsigned long data;
	ssize_t retval;
	struct hpet_dev *devp;

	devp = file->private_data;
	if (!devp->hd_ireqfreq)
		return -EIO;

	if (count < sizeof(unsigned long))
		return -EINVAL;

	add_wait_queue(&devp->hd_waitqueue, &wait);

	for ( ; ; ) {
		set_current_state(TASK_INTERRUPTIBLE);

		spin_lock_irq(&hpet_lock);
		data = devp->hd_irqdata;
		devp->hd_irqdata = 0;
		spin_unlock_irq(&hpet_lock);

		if (data)
			break;
		else if (file->f_flags & O_NONBLOCK) {
			retval = -EAGAIN;
			goto out;
		} else if (signal_pending(current)) {
			retval = -ERESTARTSYS;
			goto out;
		}
		schedule();
	}

	retval = put_user(data, (unsigned long __user *)buf);
	if (!retval)
		retval = sizeof(unsigned long);
out:
	__set_current_state(TASK_RUNNING);
	remove_wait_queue(&devp->hd_waitqueue, &wait);

	return retval;
}

static unsigned int hpet_poll(struct file *file, poll_table * wait)
{
	unsigned long v;
	struct hpet_dev *devp;

	devp = file->private_data;

	if (!devp->hd_ireqfreq)
		return 0;

	poll_wait(file, &devp->hd_waitqueue, wait);

	spin_lock_irq(&hpet_lock);
	v = devp->hd_irqdata;
	spin_unlock_irq(&hpet_lock);

	if (v != 0)
		return POLLIN | POLLRDNORM;

	return 0;
}

static int hpet_mmap(struct file *file, struct vm_area_struct *vma)
{
#ifdef	CONFIG_HPET_MMAP
	struct hpet_dev *devp;
	unsigned long addr;

	if (((vma->vm_end - vma->vm_start) != PAGE_SIZE) || vma->vm_pgoff)
		return -EINVAL;

	devp = file->private_data;
	addr = devp->hd_hpets->hp_hpet_phys;

	if (addr & (PAGE_SIZE - 1))
		return -ENOSYS;

	vma->vm_flags |= VM_IO;
	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);

	if (io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT,
					PAGE_SIZE, vma->vm_page_prot)) {
		printk(KERN_ERR "%s: io_remap_pfn_range failed\n",
			__func__);
		return -EAGAIN;
	}

	return 0;
#else
	return -ENOSYS;
#endif
}

static int hpet_fasync(int fd, struct file *file, int on)
{
	struct hpet_dev *devp;

	devp = file->private_data;

	if (fasync_helper(fd, file, on, &devp->hd_async_queue) >= 0)
		return 0;
	else
		return -EIO;
}

static int hpet_release(struct inode *inode, struct file *file)
{
	struct hpet_dev *devp;
	struct hpet_timer __iomem *timer;
	int irq = 0;

	devp = file->private_data;
	timer = devp->hd_timer;

	spin_lock_irq(&hpet_lock);

	writeq((readq(&timer->hpet_config) & ~Tn_INT_ENB_CNF_MASK),
	       &timer->hpet_config);

	irq = devp->hd_irq;
	devp->hd_irq = 0;

	devp->hd_ireqfreq = 0;

	if (devp->hd_flags & HPET_PERIODIC
	    && readq(&timer->hpet_config) & Tn_TYPE_CNF_MASK) {
		unsigned long v;

		v = readq(&timer->hpet_config);
		v ^= Tn_TYPE_CNF_MASK;
		writeq(v, &timer->hpet_config);
	}

	devp->hd_flags &= ~(HPET_OPEN | HPET_IE | HPET_PERIODIC);
	spin_unlock_irq(&hpet_lock);

	if (irq)
		free_irq(irq, devp);

	file->private_data = NULL;
	return 0;
}

static int hpet_ioctl_ieon(struct hpet_dev *devp)
{
	struct hpet_timer __iomem *timer;
	struct hpet __iomem *hpet;
	struct hpets *hpetp;
	int irq;
	unsigned long g, v, t, m;
	unsigned long flags, isr;

	timer = devp->hd_timer;
	hpet = devp->hd_hpet;
	hpetp = devp->hd_hpets;

	if (!devp->hd_ireqfreq)
		return -EIO;

	spin_lock_irq(&hpet_lock);

	if (devp->hd_flags & HPET_IE) {
		spin_unlock_irq(&hpet_lock);
		return -EBUSY;
	}

	devp->hd_flags |= HPET_IE;

	if (readl(&timer->hpet_config) & Tn_INT_TYPE_CNF_MASK)
		devp->hd_flags |= HPET_SHARED_IRQ;
	spin_unlock_irq(&hpet_lock);

	irq = devp->hd_hdwirq;

	if (irq) {
		unsigned long irq_flags;

		if (devp->hd_flags & HPET_SHARED_IRQ) {
			/*
			 * To prevent the interrupt handler from seeing an
			 * unwanted interrupt status bit, program the timer
			 * so that it will not fire in the near future ...
			 */
			writel(readl(&timer->hpet_config) & ~Tn_TYPE_CNF_MASK,
			       &timer->hpet_config);
			write_counter(read_counter(&hpet->hpet_mc),
				      &timer->hpet_compare);
			/* ... and clear any left-over status. */
			isr = 1 << (devp - devp->hd_hpets->hp_dev);
			writel(isr, &hpet->hpet_isr);
		}

		sprintf(devp->hd_name, "hpet%d", (int)(devp - hpetp->hp_dev));
		irq_flags = devp->hd_flags & HPET_SHARED_IRQ
						? IRQF_SHARED : IRQF_DISABLED;
		if (request_irq(irq, hpet_interrupt, irq_flags,
				devp->hd_name, (void *)devp)) {
			printk(KERN_ERR "hpet: IRQ %d is not free\n", irq);
			irq = 0;
		}
	}

	if (irq == 0) {
		spin_lock_irq(&hpet_lock);
		devp->hd_flags ^= HPET_IE;
		spin_unlock_irq(&hpet_lock);
		return -EIO;
	}

	devp->hd_irq = irq;
	t = devp->hd_ireqfreq;
	v = readq(&timer->hpet_config);

	/* 64-bit comparators are not yet supported through the ioctls,
	 * so force this into 32-bit mode if it supports both modes
	 */
	g = v | Tn_32MODE_CNF_MASK | Tn_INT_ENB_CNF_MASK;

	if (devp->hd_flags & HPET_PERIODIC) {
		g |= Tn_TYPE_CNF_MASK;
		v |= Tn_TYPE_CNF_MASK | Tn_VAL_SET_CNF_MASK;
		writeq(v, &timer->hpet_config);
		local_irq_save(flags);

		/*
		 * NOTE: First we modify the hidden accumulator
		 * register supported by periodic-capable comparators.
		 * We never want to modify the (single) counter; that
		 * would affect all the comparators. The value written
		 * is the counter value when the first interrupt is due.
		 */
		m = read_counter(&hpet->hpet_mc);
		write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare);
		/*
		 * Then we modify the comparator, indicating the period
		 * for subsequent interrupt.
		 */
		write_counter(t, &timer->hpet_compare);
	} else {
		local_irq_save(flags);
		m = read_counter(&hpet->hpet_mc);
		write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare);
	}

	if (devp->hd_flags & HPET_SHARED_IRQ) {
		isr = 1 << (devp - devp->hd_hpets->hp_dev);
		writel(isr, &hpet->hpet_isr);
	}
	writeq(g, &timer->hpet_config);
	local_irq_restore(flags);

	return 0;
}

/* converts Hz to number of timer ticks */
static inline unsigned long hpet_time_div(struct hpets *hpets,
					  unsigned long dis)
{
	unsigned long long m;

	m = hpets->hp_tick_freq + (dis >> 1);
	do_div(m, dis);
	return (unsigned long)m;
}

static int
hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
		  struct hpet_info *info)
{
	struct hpet_timer __iomem *timer;
	struct hpet __iomem *hpet;
	struct hpets *hpetp;
	int err;
	unsigned long v;

	switch (cmd) {
	case HPET_IE_OFF:
	case HPET_INFO:
	case HPET_EPI:
	case HPET_DPI:
	case HPET_IRQFREQ:
		timer = devp->hd_timer;
		hpet = devp->hd_hpet;
		hpetp = devp->hd_hpets;
		break;
	case HPET_IE_ON:
		return hpet_ioctl_ieon(devp);
	default:
		return -EINVAL;
	}

	err = 0;

	switch (cmd) {
	case HPET_IE_OFF:
		if ((devp->hd_flags & HPET_IE) == 0)
			break;
		v = readq(&timer->hpet_config);
		v &= ~Tn_INT_ENB_CNF_MASK;
		writeq(v, &timer->hpet_config);
		if (devp->hd_irq) {
			free_irq(devp->hd_irq, devp);
			devp->hd_irq = 0;
		}
		devp->hd_flags ^= HPET_IE;
		break;
	case HPET_INFO:
		{
			memset(info, 0, sizeof(*info));
			if (devp->hd_ireqfreq)
				info->hi_ireqfreq =
					hpet_time_div(hpetp, devp->hd_ireqfreq);
			info->hi_flags =
			    readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK;
			info->hi_hpet = hpetp->hp_which;
			info->hi_timer = devp - hpetp->hp_dev;
			break;
		}
	case HPET_EPI:
		v = readq(&timer->hpet_config);
		if ((v & Tn_PER_INT_CAP_MASK) == 0) {
			err = -ENXIO;
			break;
		}
		devp->hd_flags |= HPET_PERIODIC;
		break;
	case HPET_DPI:
		v = readq(&timer->hpet_config);
		if ((v & Tn_PER_INT_CAP_MASK) == 0) {
			err = -ENXIO;
			break;
		}
		if (devp->hd_flags & HPET_PERIODIC &&
		    readq(&timer->hpet_config) & Tn_TYPE_CNF_MASK) {
			v = readq(&timer->hpet_config);
			v ^= Tn_TYPE_CNF_MASK;
			writeq(v, &timer->hpet_config);
		}
		devp->hd_flags &= ~HPET_PERIODIC;
		break;
	case HPET_IRQFREQ:
		if ((arg > hpet_max_freq) &&
		    !capable(CAP_SYS_RESOURCE)) {
			err = -EACCES;
			break;
		}

		if (!arg) {
			err = -EINVAL;
			break;
		}

		devp->hd_ireqfreq = hpet_time_div(hpetp, arg);
	}

	return err;
}

static long
hpet_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
	struct hpet_info info;
	int err;

	mutex_lock(&hpet_mutex);
	err = hpet_ioctl_common(file->private_data, cmd, arg, &info);
	mutex_unlock(&hpet_mutex);

	if ((cmd == HPET_INFO) && !err &&
	    (copy_to_user((void __user *)arg, &info, sizeof(info))))
		err = -EFAULT;

	return err;
}

#ifdef CONFIG_COMPAT
struct compat_hpet_info {
	compat_ulong_t hi_ireqfreq;	/* Hz */
	compat_ulong_t hi_flags;	/* information */
	unsigned short hi_hpet;
	unsigned short hi_timer;
};

static long
hpet_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
	struct hpet_info info;
	int err;

	mutex_lock(&hpet_mutex);
	err = hpet_ioctl_common(file->private_data, cmd, arg, &info);
	mutex_unlock(&hpet_mutex);

	if ((cmd == HPET_INFO) && !err) {
		struct compat_hpet_info __user *u = compat_ptr(arg);
		if (put_user(info.hi_ireqfreq, &u->hi_ireqfreq) ||
		    put_user(info.hi_flags, &u->hi_flags) ||
		    put_user(info.hi_hpet, &u->hi_hpet) ||
		    put_user(info.hi_timer, &u->hi_timer))
			err = -EFAULT;
	}

	return err;
}
#endif

static const struct file_operations hpet_fops = {
	.owner = THIS_MODULE,
	.llseek = no_llseek,
	.read = hpet_read,
	.poll = hpet_poll,
	.unlocked_ioctl = hpet_ioctl,
#ifdef CONFIG_COMPAT
	.compat_ioctl = hpet_compat_ioctl,
#endif
	.open = hpet_open,
	.release = hpet_release,
	.fasync = hpet_fasync,
	.mmap = hpet_mmap,
};

static int hpet_is_known(struct hpet_data *hdp)
{
	struct hpets *hpetp;

	for (hpetp = hpets; hpetp; hpetp = hpetp->hp_next)
		if (hpetp->hp_hpet_phys == hdp->hd_phys_address)
			return 1;

	return 0;
}

static ctl_table hpet_table[] = {
	{
	 .procname = "max-user-freq",
	 .data = &hpet_max_freq,
	 .maxlen = sizeof(int),
	 .mode = 0644,
	 .proc_handler = proc_dointvec,
	 },
	{}
};

static ctl_table hpet_root[] = {
	{
	 .procname = "hpet",
	 .maxlen = 0,
	 .mode = 0555,
	 .child = hpet_table,
	 },
	{}
};

static ctl_table dev_root[] = {
	{
	 .procname = "dev",
	 .maxlen = 0,
	 .mode = 0555,
	 .child = hpet_root,
	 },
	{}
};

static struct ctl_table_header *sysctl_header;

/*
 * Adjustment for when arming the timer with
 * initial conditions.  That is, main counter
 * ticks expired before interrupts are enabled.
 */
#define	TICK_CALIBRATE	(1000UL)

static unsigned long __hpet_calibrate(struct hpets *hpetp)
{
	struct hpet_timer __iomem *timer = NULL;
	unsigned long t, m, count, i, flags, start;
	struct hpet_dev *devp;
	int j;
	struct hpet __iomem *hpet;

	for (j = 0, devp = hpetp->hp_dev; j < hpetp->hp_ntimer; j++, devp++)
		if ((devp->hd_flags & HPET_OPEN) == 0) {
			timer = devp->hd_timer;
			break;
		}

	if (!timer)
		return 0;

	hpet = hpetp->hp_hpet;
	t = read_counter(&timer->hpet_compare);

	i = 0;
	count = hpet_time_div(hpetp, TICK_CALIBRATE);

	local_irq_save(flags);

	start = read_counter(&hpet->hpet_mc);

	do {
		m = read_counter(&hpet->hpet_mc);
		write_counter(t + m + hpetp->hp_delta, &timer->hpet_compare);
	} while (i++, (m - start) < count);

	local_irq_restore(flags);

	return (m - start) / i;
}

static unsigned long hpet_calibrate(struct hpets *hpetp)
{
	unsigned long ret = -1;
	unsigned long tmp;

	/*
	 * Try to calibrate until return value becomes stable small value.
	 * If SMI interruption occurs in calibration loop, the return value
	 * will be big. This avoids its impact.
	 */
	for ( ; ; ) {
		tmp = __hpet_calibrate(hpetp);
		if (ret <= tmp)
			break;
		ret = tmp;
	}

	return ret;
}

int hpet_alloc(struct hpet_data *hdp)
{
	u64 cap, mcfg;
	struct hpet_dev *devp;
	u32 i, ntimer;
	struct hpets *hpetp;
	size_t siz;
	struct hpet __iomem *hpet;
	static struct hpets *last;
	unsigned long period;
	unsigned long long temp;
	u32 remainder;

	/*
	 * hpet_alloc can be called by platform dependent code.
	 * If platform dependent code has allocated the hpet that
	 * ACPI has also reported, then we catch it here.
	 */
	if (hpet_is_known(hdp)) {
		printk(KERN_DEBUG "%s: duplicate HPET ignored\n",
			__func__);
		return 0;
	}

	siz = sizeof(struct hpets) + ((hdp->hd_nirqs - 1) *
				      sizeof(struct hpet_dev));

	hpetp = kzalloc(siz, GFP_KERNEL);

	if (!hpetp)
		return -ENOMEM;

	hpetp->hp_which = hpet_nhpet++;
	hpetp->hp_hpet = hdp->hd_address;
	hpetp->hp_hpet_phys = hdp->hd_phys_address;

	hpetp->hp_ntimer = hdp->hd_nirqs;

	for (i = 0; i < hdp->hd_nirqs; i++)
		hpetp->hp_dev[i].hd_hdwirq = hdp->hd_irq[i];

	hpet = hpetp->hp_hpet;

	cap = readq(&hpet->hpet_cap);

	ntimer = ((cap & HPET_NUM_TIM_CAP_MASK) >> HPET_NUM_TIM_CAP_SHIFT) + 1;

	if (hpetp->hp_ntimer != ntimer) {
		printk(KERN_WARNING "hpet: number irqs doesn't agree"
		       " with number of timers\n");
		kfree(hpetp);
		return -ENODEV;
	}

	if (last)
		last->hp_next = hpetp;
	else
		hpets = hpetp;

	last = hpetp;

	period = (cap & HPET_COUNTER_CLK_PERIOD_MASK) >>
		HPET_COUNTER_CLK_PERIOD_SHIFT; /* fs, 10^-15 */
	temp = 1000000000000000uLL; /* 10^15 femtoseconds per second */
	temp += period >> 1; /* round */
	do_div(temp, period);
	hpetp->hp_tick_freq = temp; /* ticks per second */

	printk(KERN_INFO "hpet%d: at MMIO 0x%lx, IRQ%s",
		hpetp->hp_which, hdp->hd_phys_address,
		hpetp->hp_ntimer > 1 ? "s" : "");
	for (i = 0; i < hpetp->hp_ntimer; i++)
		printk("%s %d", i > 0 ? "," : "", hdp->hd_irq[i]);
	printk("\n");

	temp = hpetp->hp_tick_freq;
	remainder = do_div(temp, 1000000);
	printk(KERN_INFO
		"hpet%u: %u comparators, %d-bit %u.%06u MHz counter\n",
		hpetp->hp_which, hpetp->hp_ntimer,
		cap & HPET_COUNTER_SIZE_MASK ? 64 : 32,
		(unsigned) temp, remainder);

	mcfg = readq(&hpet->hpet_config);
	if ((mcfg & HPET_ENABLE_CNF_MASK) == 0) {
		write_counter(0L, &hpet->hpet_mc);
		mcfg |= HPET_ENABLE_CNF_MASK;
		writeq(mcfg, &hpet->hpet_config);
	}

	for (i = 0, devp = hpetp->hp_dev; i < hpetp->hp_ntimer; i++, devp++) {
		struct hpet_timer __iomem *timer;

		timer = &hpet->hpet_timers[devp - hpetp->hp_dev];

		devp->hd_hpets = hpetp;
		devp->hd_hpet = hpet;
		devp->hd_timer = timer;

		/*
		 * If the timer was reserved by platform code,
		 * then make timer unavailable for opens.
		 */
		if (hdp->hd_state & (1 << i)) {
			devp->hd_flags = HPET_OPEN;
			continue;
		}

		init_waitqueue_head(&devp->hd_waitqueue);
	}

	hpetp->hp_delta = hpet_calibrate(hpetp);

/* This clocksource driver currently only works on ia64 */
#ifdef CONFIG_IA64
	if (!hpet_clocksource) {
		hpet_mctr = (void __iomem *)&hpetp->hp_hpet->hpet_mc;
		CLKSRC_FSYS_MMIO_SET(clocksource_hpet.fsys_mmio, hpet_mctr);
		clocksource_register_hz(&clocksource_hpet, hpetp->hp_tick_freq);
		hpetp->hp_clocksource = &clocksource_hpet;
		hpet_clocksource = &clocksource_hpet;
	}
#endif

	return 0;
}

static acpi_status hpet_resources(struct acpi_resource *res, void *data)
{
	struct hpet_data *hdp;
	acpi_status status;
	struct acpi_resource_address64 addr;

	hdp = data;

	status = acpi_resource_to_address64(res, &addr);

	if (ACPI_SUCCESS(status)) {
		hdp->hd_phys_address = addr.minimum;
		hdp->hd_address = ioremap(addr.minimum, addr.address_length);

		if (hpet_is_known(hdp)) {
			iounmap(hdp->hd_address);
			return AE_ALREADY_EXISTS;
		}
	} else if (res->type == ACPI_RESOURCE_TYPE_FIXED_MEMORY32) {
		struct acpi_resource_fixed_memory32 *fixmem32;

		fixmem32 = &res->data.fixed_memory32;
		if (!fixmem32)
			return AE_NO_MEMORY;

		hdp->hd_phys_address = fixmem32->address;
		hdp->hd_address = ioremap(fixmem32->address,
						HPET_RANGE_SIZE);

		if (hpet_is_known(hdp)) {
			iounmap(hdp->hd_address);
			return AE_ALREADY_EXISTS;
		}
	} else if (res->type == ACPI_RESOURCE_TYPE_EXTENDED_IRQ) {
		struct acpi_resource_extended_irq *irqp;
		int i, irq;

		irqp = &res->data.extended_irq;

		for (i = 0; i < irqp->interrupt_count; i++) {
			irq = acpi_register_gsi(NULL, irqp->interrupts[i],
				      irqp->triggering, irqp->polarity);
			if (irq < 0)
				return AE_ERROR;

			hdp->hd_irq[hdp->hd_nirqs] = irq;
			hdp->hd_nirqs++;
		}
	}

	return AE_OK;
}

static int hpet_acpi_add(struct acpi_device *device)
{
	acpi_status result;
	struct hpet_data data;

	memset(&data, 0, sizeof(data));

	result =
	    acpi_walk_resources(device->handle, METHOD_NAME__CRS,
				hpet_resources, &data);

	if (ACPI_FAILURE(result))
		return -ENODEV;

	if (!data.hd_address || !data.hd_nirqs) {
		if (data.hd_address)
			iounmap(data.hd_address);
		printk("%s: no address or irqs in _CRS\n", __func__);
		return -ENODEV;
	}

	return hpet_alloc(&data);
}

static int hpet_acpi_remove(struct acpi_device *device, int type)
{
	/* XXX need to unregister clocksource, dealloc mem, etc */
	return -EINVAL;
}

static const struct acpi_device_id hpet_device_ids[] = {
	{"PNP0103", 0},
	{"", 0},
};
MODULE_DEVICE_TABLE(acpi, hpet_device_ids);

static struct acpi_driver hpet_acpi_driver = {
	.name = "hpet",
	.ids = hpet_device_ids,
	.ops = {
		.add = hpet_acpi_add,
		.remove = hpet_acpi_remove,
		},
};

static struct miscdevice hpet_misc = { HPET_MINOR, "hpet", &hpet_fops };

static int __init hpet_init(void)
{
	int result;

	result = misc_register(&hpet_misc);
	if (result < 0)
		return -ENODEV;

	sysctl_header = register_sysctl_table(dev_root);

	result = acpi_bus_register_driver(&hpet_acpi_driver);
	if (result < 0) {
		if (sysctl_header)
			unregister_sysctl_table(sysctl_header);
		misc_deregister(&hpet_misc);
		return result;
	}

	return 0;
}

static void __exit hpet_exit(void)
{
	acpi_bus_unregister_driver(&hpet_acpi_driver);

	if (sysctl_header)
		unregister_sysctl_table(sysctl_header);
	misc_deregister(&hpet_misc);

	return;
}

module_init(hpet_init);
module_exit(hpet_exit);
MODULE_AUTHOR("Bob Picco <Robert.Picco@hp.com>");
MODULE_LICENSE("GPL");
~mode) == 0) return 0; return -EACCES; } /** * generic_permission - check for access rights on a Posix-like filesystem * @inode: inode to check access rights for * @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC) * @check_acl: optional callback to check for Posix ACLs * @flags: IPERM_FLAG_ flags. * * Used to check for read/write/execute permissions on a file. * We use "fsuid" for this, letting us set arbitrary permissions * for filesystem access without changing the "normal" uids which * are used for other things. * * generic_permission is rcu-walk aware. It returns -ECHILD in case an rcu-walk * request cannot be satisfied (eg. requires blocking or too much complexity). * It would then be called again in ref-walk mode. */ int generic_permission(struct inode *inode, int mask, unsigned int flags, int (*check_acl)(struct inode *inode, int mask, unsigned int flags)) { int ret; /* * Do the basic POSIX ACL permission checks. */ ret = acl_permission_check(inode, mask, flags, check_acl); if (ret != -EACCES) return ret; /* * Read/write DACs are always overridable. * Executable DACs are overridable for all directories and * for non-directories that have least one exec bit set. */ if (!(mask & MAY_EXEC) || execute_ok(inode)) if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE)) return 0; /* * Searching includes executable on directories, else just read. */ mask &= MAY_READ | MAY_WRITE | MAY_EXEC; if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE))) if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH)) return 0; return -EACCES; } /** * inode_permission - check for access rights to a given inode * @inode: inode to check permission on * @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC) * * Used to check for read/write/execute permissions on an inode. * We use "fsuid" for this, letting us set arbitrary permissions * for filesystem access without changing the "normal" uids which * are used for other things. */ int inode_permission(struct inode *inode, int mask) { int retval; if (mask & MAY_WRITE) { umode_t mode = inode->i_mode; /* * Nobody gets write access to a read-only fs. */ if (IS_RDONLY(inode) && (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) return -EROFS; /* * Nobody gets write access to an immutable file. */ if (IS_IMMUTABLE(inode)) return -EACCES; } if (inode->i_op->permission) retval = inode->i_op->permission(inode, mask, 0); else retval = generic_permission(inode, mask, 0, inode->i_op->check_acl); if (retval) return retval; retval = devcgroup_inode_permission(inode, mask); if (retval) return retval; return security_inode_permission(inode, mask); } /** * file_permission - check for additional access rights to a given file * @file: file to check access rights for * @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC) * * Used to check for read/write/execute permissions on an already opened * file. * * Note: * Do not use this function in new code. All access checks should * be done using inode_permission(). */ int file_permission(struct file *file, int mask) { return inode_permission(file->f_path.dentry->d_inode, mask); } /* * get_write_access() gets write permission for a file. * put_write_access() releases this write permission. * This is used for regular files. * We cannot support write (and maybe mmap read-write shared) accesses and * MAP_DENYWRITE mmappings simultaneously. The i_writecount field of an inode * can have the following values: * 0: no writers, no VM_DENYWRITE mappings * < 0: (-i_writecount) vm_area_structs with VM_DENYWRITE set exist * > 0: (i_writecount) users are writing to the file. * * Normally we operate on that counter with atomic_{inc,dec} and it's safe * except for the cases where we don't hold i_writecount yet. Then we need to * use {get,deny}_write_access() - these functions check the sign and refuse * to do the change if sign is wrong. Exclusion between them is provided by * the inode->i_lock spinlock. */ int get_write_access(struct inode * inode) { spin_lock(&inode->i_lock); if (atomic_read(&inode->i_writecount) < 0) { spin_unlock(&inode->i_lock); return -ETXTBSY; } atomic_inc(&inode->i_writecount); spin_unlock(&inode->i_lock); return 0; } int deny_write_access(struct file * file) { struct inode *inode = file->f_path.dentry->d_inode; spin_lock(&inode->i_lock); if (atomic_read(&inode->i_writecount) > 0) { spin_unlock(&inode->i_lock); return -ETXTBSY; } atomic_dec(&inode->i_writecount); spin_unlock(&inode->i_lock); return 0; } /** * path_get - get a reference to a path * @path: path to get the reference to * * Given a path increment the reference count to the dentry and the vfsmount. */ void path_get(struct path *path) { mntget(path->mnt); dget(path->dentry); } EXPORT_SYMBOL(path_get); /** * path_put - put a reference to a path * @path: path to put the reference to * * Given a path decrement the reference count to the dentry and the vfsmount. */ void path_put(struct path *path) { dput(path->dentry); mntput(path->mnt); } EXPORT_SYMBOL(path_put); /* * Path walking has 2 modes, rcu-walk and ref-walk (see * Documentation/filesystems/path-lookup.txt). In situations when we can't * continue in RCU mode, we attempt to drop out of rcu-walk mode and grab * normal reference counts on dentries and vfsmounts to transition to rcu-walk * mode. Refcounts are grabbed at the last known good point before rcu-walk * got stuck, so ref-walk may continue from there. If this is not successful * (eg. a seqcount has changed), then failure is returned and it's up to caller * to restart the path walk from the beginning in ref-walk mode. */ /** * unlazy_walk - try to switch to ref-walk mode. * @nd: nameidata pathwalk data * @dentry: child of nd->path.dentry or NULL * Returns: 0 on success, -ECHILD on failure * * unlazy_walk attempts to legitimize the current nd->path, nd->root and dentry * for ref-walk mode. @dentry must be a path found by a do_lookup call on * @nd or NULL. Must be called from rcu-walk context. */ static int unlazy_walk(struct nameidata *nd, struct dentry *dentry) { struct fs_struct *fs = current->fs; struct dentry *parent = nd->path.dentry; int want_root = 0; BUG_ON(!(nd->flags & LOOKUP_RCU)); if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) { want_root = 1; spin_lock(&fs->lock); if (nd->root.mnt != fs->root.mnt || nd->root.dentry != fs->root.dentry) goto err_root; } spin_lock(&parent->d_lock); if (!dentry) { if (!__d_rcu_to_refcount(parent, nd->seq)) goto err_parent; BUG_ON(nd->inode != parent->d_inode); } else { spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); if (!__d_rcu_to_refcount(dentry, nd->seq)) goto err_child; /* * If the sequence check on the child dentry passed, then * the child has not been removed from its parent. This * means the parent dentry must be valid and able to take * a reference at this point. */ BUG_ON(!IS_ROOT(dentry) && dentry->d_parent != parent); BUG_ON(!parent->d_count); parent->d_count++; spin_unlock(&dentry->d_lock); } spin_unlock(&parent->d_lock); if (want_root) { path_get(&nd->root); spin_unlock(&fs->lock); } mntget(nd->path.mnt); rcu_read_unlock(); br_read_unlock(vfsmount_lock); nd->flags &= ~LOOKUP_RCU; return 0; err_child: spin_unlock(&dentry->d_lock); err_parent: spin_unlock(&parent->d_lock); err_root: if (want_root) spin_unlock(&fs->lock); return -ECHILD; } /** * release_open_intent - free up open intent resources * @nd: pointer to nameidata */ void release_open_intent(struct nameidata *nd) { struct file *file = nd->intent.open.file; if (file && !IS_ERR(file)) { if (file->f_path.dentry == NULL) put_filp(file); else fput(file); } } static inline int d_revalidate(struct dentry *dentry, struct nameidata *nd) { return dentry->d_op->d_revalidate(dentry, nd); } static struct dentry * do_revalidate(struct dentry *dentry, struct nameidata *nd) { int status = d_revalidate(dentry, nd); if (unlikely(status <= 0)) { /* * The dentry failed validation. * If d_revalidate returned 0 attempt to invalidate * the dentry otherwise d_revalidate is asking us * to return a fail status. */ if (status < 0) { dput(dentry); dentry = ERR_PTR(status); } else if (!d_invalidate(dentry)) { dput(dentry); dentry = NULL; } } return dentry; } /** * complete_walk - successful completion of path walk * @nd: pointer nameidata * * If we had been in RCU mode, drop out of it and legitimize nd->path. * Revalidate the final result, unless we'd already done that during * the path walk or the filesystem doesn't ask for it. Return 0 on * success, -error on failure. In case of failure caller does not * need to drop nd->path. */ static int complete_walk(struct nameidata *nd) { struct dentry *dentry = nd->path.dentry; int status; if (nd->flags & LOOKUP_RCU) { nd->flags &= ~LOOKUP_RCU; if (!(nd->flags & LOOKUP_ROOT)) nd->root.mnt = NULL; spin_lock(&dentry->d_lock); if (unlikely(!__d_rcu_to_refcount(dentry, nd->seq))) { spin_unlock(&dentry->d_lock); rcu_read_unlock(); br_read_unlock(vfsmount_lock); return -ECHILD; } BUG_ON(nd->inode != dentry->d_inode); spin_unlock(&dentry->d_lock); mntget(nd->path.mnt); rcu_read_unlock(); br_read_unlock(vfsmount_lock); } if (likely(!(nd->flags & LOOKUP_JUMPED))) return 0; if (likely(!(dentry->d_flags & DCACHE_OP_REVALIDATE))) return 0; if (likely(!(dentry->d_sb->s_type->fs_flags & FS_REVAL_DOT))) return 0; /* Note: we do not d_invalidate() */ status = d_revalidate(dentry, nd); if (status > 0) return 0; if (!status) status = -ESTALE; path_put(&nd->path); return status; } /* * Short-cut version of permission(), for calling on directories * during pathname resolution. Combines parts of permission() * and generic_permission(), and tests ONLY for MAY_EXEC permission. * * If appropriate, check DAC only. If not appropriate, or * short-cut DAC fails, then call ->permission() to do more * complete permission check. */ static inline int exec_permission(struct inode *inode, unsigned int flags) { int ret; struct user_namespace *ns = inode_userns(inode); if (inode->i_op->permission) { ret = inode->i_op->permission(inode, MAY_EXEC, flags); } else { ret = acl_permission_check(inode, MAY_EXEC, flags, inode->i_op->check_acl); } if (likely(!ret)) goto ok; if (ret == -ECHILD) return ret; if (ns_capable(ns, CAP_DAC_OVERRIDE) || ns_capable(ns, CAP_DAC_READ_SEARCH)) goto ok; return ret; ok: return security_inode_exec_permission(inode, flags); } static __always_inline void set_root(struct nameidata *nd) { if (!nd->root.mnt) get_fs_root(current->fs, &nd->root); } static int link_path_walk(const char *, struct nameidata *); static __always_inline void set_root_rcu(struct nameidata *nd) { if (!nd->root.mnt) { struct fs_struct *fs = current->fs; unsigned seq; do { seq = read_seqcount_begin(&fs->seq); nd->root = fs->root; nd->seq = __read_seqcount_begin(&nd->root.dentry->d_seq); } while (read_seqcount_retry(&fs->seq, seq)); } } static __always_inline int __vfs_follow_link(struct nameidata *nd, const char *link) { int ret; if (IS_ERR(link)) goto fail; if (*link == '/') { set_root(nd); path_put(&nd->path); nd->path = nd->root; path_get(&nd->root); nd->flags |= LOOKUP_JUMPED; } nd->inode = nd->path.dentry->d_inode; ret = link_path_walk(link, nd); return ret; fail: path_put(&nd->path); return PTR_ERR(link); } static void path_put_conditional(struct path *path, struct nameidata *nd) { dput(path->dentry); if (path->mnt != nd->path.mnt) mntput(path->mnt); } static inline void path_to_nameidata(const struct path *path, struct nameidata *nd) { if (!(nd->flags & LOOKUP_RCU)) { dput(nd->path.dentry); if (nd->path.mnt != path->mnt) mntput(nd->path.mnt); } nd->path.mnt = path->mnt; nd->path.dentry = path->dentry; } static inline void put_link(struct nameidata *nd, struct path *link, void *cookie) { struct inode *inode = link->dentry->d_inode; if (!IS_ERR(cookie) && inode->i_op->put_link) inode->i_op->put_link(link->dentry, nd, cookie); path_put(link); } static __always_inline int follow_link(struct path *link, struct nameidata *nd, void **p) { int error; struct dentry *dentry = link->dentry; BUG_ON(nd->flags & LOOKUP_RCU); if (link->mnt == nd->path.mnt) mntget(link->mnt); if (unlikely(current->total_link_count >= 40)) { *p = ERR_PTR(-ELOOP); /* no ->put_link(), please */ path_put(&nd->path); return -ELOOP; } cond_resched(); current->total_link_count++; touch_atime(link->mnt, dentry); nd_set_link(nd, NULL); error = security_inode_follow_link(link->dentry, nd); if (error) { *p = ERR_PTR(error); /* no ->put_link(), please */ path_put(&nd->path); return error; } nd->last_type = LAST_BIND; *p = dentry->d_inode->i_op->follow_link(dentry, nd); error = PTR_ERR(*p); if (!IS_ERR(*p)) { char *s = nd_get_link(nd); error = 0; if (s) error = __vfs_follow_link(nd, s); else if (nd->last_type == LAST_BIND) { nd->flags |= LOOKUP_JUMPED; nd->inode = nd->path.dentry->d_inode; if (nd->inode->i_op->follow_link) { /* stepped on a _really_ weird one */ path_put(&nd->path); error = -ELOOP; } } } return error; } static int follow_up_rcu(struct path *path) { struct vfsmount *parent; struct dentry *mountpoint; parent = path->mnt->mnt_parent; if (parent == path->mnt) return 0; mountpoint = path->mnt->mnt_mountpoint; path->dentry = mountpoint; path->mnt = parent; return 1; } int follow_up(struct path *path) { struct vfsmount *parent; struct dentry *mountpoint; br_read_lock(vfsmount_lock); parent = path->mnt->mnt_parent; if (parent == path->mnt) { br_read_unlock(vfsmount_lock); return 0; } mntget(parent); mountpoint = dget(path->mnt->mnt_mountpoint); br_read_unlock(vfsmount_lock); dput(path->dentry); path->dentry = mountpoint; mntput(path->mnt); path->mnt = parent; return 1; } /* * Perform an automount * - return -EISDIR to tell follow_managed() to stop and return the path we * were called with. */ static int follow_automount(struct path *path, unsigned flags, bool *need_mntput) { struct vfsmount *mnt; int err; if (!path->dentry->d_op || !path->dentry->d_op->d_automount) return -EREMOTE; /* We don't want to mount if someone supplied AT_NO_AUTOMOUNT * and this is the terminal part of the path. */ if ((flags & LOOKUP_NO_AUTOMOUNT) && !(flags & LOOKUP_CONTINUE)) return -EISDIR; /* we actually want to stop here */ /* We want to mount if someone is trying to open/create a file of any * type under the mountpoint, wants to traverse through the mountpoint * or wants to open the mounted directory. * * We don't want to mount if someone's just doing a stat and they've * set AT_SYMLINK_NOFOLLOW - unless they're stat'ing a directory and * appended a '/' to the name. */ if (!(flags & LOOKUP_FOLLOW) && !(flags & (LOOKUP_CONTINUE | LOOKUP_DIRECTORY | LOOKUP_OPEN | LOOKUP_CREATE))) return -EISDIR; current->total_link_count++; if (current->total_link_count >= 40) return -ELOOP; mnt = path->dentry->d_op->d_automount(path); if (IS_ERR(mnt)) { /* * The filesystem is allowed to return -EISDIR here to indicate * it doesn't want to automount. For instance, autofs would do * this so that its userspace daemon can mount on this dentry. * * However, we can only permit this if it's a terminal point in * the path being looked up; if it wasn't then the remainder of * the path is inaccessible and we should say so. */ if (PTR_ERR(mnt) == -EISDIR && (flags & LOOKUP_CONTINUE)) return -EREMOTE; return PTR_ERR(mnt); } if (!mnt) /* mount collision */ return 0; if (!*need_mntput) { /* lock_mount() may release path->mnt on error */ mntget(path->mnt); *need_mntput = true; } err = finish_automount(mnt, path); switch (err) { case -EBUSY: /* Someone else made a mount here whilst we were busy */ return 0; case 0: path_put(path); path->mnt = mnt; path->dentry = dget(mnt->mnt_root); return 0; default: return err; } } /* * Handle a dentry that is managed in some way. * - Flagged for transit management (autofs) * - Flagged as mountpoint * - Flagged as automount point * * This may only be called in refwalk mode. * * Serialization is taken care of in namespace.c */ static int follow_managed(struct path *path, unsigned flags) { struct vfsmount *mnt = path->mnt; /* held by caller, must be left alone */ unsigned managed; bool need_mntput = false; int ret = 0; /* Given that we're not holding a lock here, we retain the value in a * local variable for each dentry as we look at it so that we don't see * the components of that value change under us */ while (managed = ACCESS_ONCE(path->dentry->d_flags), managed &= DCACHE_MANAGED_DENTRY, unlikely(managed != 0)) { /* Allow the filesystem to manage the transit without i_mutex * being held. */ if (managed & DCACHE_MANAGE_TRANSIT) { BUG_ON(!path->dentry->d_op); BUG_ON(!path->dentry->d_op->d_manage); ret = path->dentry->d_op->d_manage(path->dentry, false); if (ret < 0) break; } /* Transit to a mounted filesystem. */ if (managed & DCACHE_MOUNTED) { struct vfsmount *mounted = lookup_mnt(path); if (mounted) { dput(path->dentry); if (need_mntput) mntput(path->mnt); path->mnt = mounted; path->dentry = dget(mounted->mnt_root); need_mntput = true; continue; } /* Something is mounted on this dentry in another * namespace and/or whatever was mounted there in this * namespace got unmounted before we managed to get the * vfsmount_lock */ } /* Handle an automount point */ if (managed & DCACHE_NEED_AUTOMOUNT) { ret = follow_automount(path, flags, &need_mntput); if (ret < 0) break; continue; } /* We didn't change the current path point */ break; } if (need_mntput && path->mnt == mnt) mntput(path->mnt); if (ret == -EISDIR) ret = 0; return ret; } int follow_down_one(struct path *path) { struct vfsmount *mounted; mounted = lookup_mnt(path); if (mounted) { dput(path->dentry); mntput(path->mnt); path->mnt = mounted; path->dentry = dget(mounted->mnt_root); return 1; } return 0; } static inline bool managed_dentry_might_block(struct dentry *dentry) { return (dentry->d_flags & DCACHE_MANAGE_TRANSIT && dentry->d_op->d_manage(dentry, true) < 0); } /* * Try to skip to top of mountpoint pile in rcuwalk mode. Fail if * we meet a managed dentry that would need blocking. */ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path, struct inode **inode) { for (;;) { struct vfsmount *mounted; /* * Don't forget we might have a non-mountpoint managed dentry * that wants to block transit. */ *inode = path->dentry->d_inode; if (unlikely(managed_dentry_might_block(path->dentry))) return false; if (!d_mountpoint(path->dentry)) break; mounted = __lookup_mnt(path->mnt, path->dentry, 1); if (!mounted) break; path->mnt = mounted; path->dentry = mounted->mnt_root; nd->seq = read_seqcount_begin(&path->dentry->d_seq); } return true; } static void follow_mount_rcu(struct nameidata *nd) { while (d_mountpoint(nd->path.dentry)) { struct vfsmount *mounted; mounted = __lookup_mnt(nd->path.mnt, nd->path.dentry, 1); if (!mounted) break; nd->path.mnt = mounted; nd->path.dentry = mounted->mnt_root; nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq); } } static int follow_dotdot_rcu(struct nameidata *nd) { set_root_rcu(nd); while (1) { if (nd->path.dentry == nd->root.dentry && nd->path.mnt == nd->root.mnt) { break; } if (nd->path.dentry != nd->path.mnt->mnt_root) { struct dentry *old = nd->path.dentry; struct dentry *parent = old->d_parent; unsigned seq; seq = read_seqcount_begin(&parent->d_seq); if (read_seqcount_retry(&old->d_seq, nd->seq)) goto failed; nd->path.dentry = parent; nd->seq = seq; break; } if (!follow_up_rcu(&nd->path)) break; nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq); } follow_mount_rcu(nd); nd->inode = nd->path.dentry->d_inode; return 0; failed: nd->flags &= ~LOOKUP_RCU; if (!(nd->flags & LOOKUP_ROOT)) nd->root.mnt = NULL; rcu_read_unlock(); br_read_unlock(vfsmount_lock); return -ECHILD; } /* * Follow down to the covering mount currently visible to userspace. At each * point, the filesystem owning that dentry may be queried as to whether the * caller is permitted to proceed or not. */ int follow_down(struct path *path) { unsigned managed; int ret; while (managed = ACCESS_ONCE(path->dentry->d_flags), unlikely(managed & DCACHE_MANAGED_DENTRY)) { /* Allow the filesystem to manage the transit without i_mutex * being held. * * We indicate to the filesystem if someone is trying to mount * something here. This gives autofs the chance to deny anyone * other than its daemon the right to mount on its * superstructure. * * The filesystem may sleep at this point. */ if (managed & DCACHE_MANAGE_TRANSIT) { BUG_ON(!path->dentry->d_op); BUG_ON(!path->dentry->d_op->d_manage); ret = path->dentry->d_op->d_manage( path->dentry, false); if (ret < 0) return ret == -EISDIR ? 0 : ret; } /* Transit to a mounted filesystem. */ if (managed & DCACHE_MOUNTED) { struct vfsmount *mounted = lookup_mnt(path); if (!mounted) break; dput(path->dentry); mntput(path->mnt); path->mnt = mounted; path->dentry = dget(mounted->mnt_root); continue; } /* Don't handle automount points here */ break; } return 0; } /* * Skip to top of mountpoint pile in refwalk mode for follow_dotdot() */ static void follow_mount(struct path *path) { while (d_mountpoint(path->dentry)) { struct vfsmount *mounted = lookup_mnt(path); if (!mounted) break; dput(path->dentry); mntput(path->mnt); path->mnt = mounted; path->dentry = dget(mounted->mnt_root); } } static void follow_dotdot(struct nameidata *nd) { set_root(nd); while(1) { struct dentry *old = nd->path.dentry; if (nd->path.dentry == nd->root.dentry && nd->path.mnt == nd->root.mnt) { break; } if (nd->path.dentry != nd->path.mnt->mnt_root) { /* rare case of legitimate dget_parent()... */ nd->path.dentry = dget_parent(nd->path.dentry); dput(old); break; } if (!follow_up(&nd->path)) break; } follow_mount(&nd->path); nd->inode = nd->path.dentry->d_inode; } /* * Allocate a dentry with name and parent, and perform a parent * directory ->lookup on it. Returns the new dentry, or ERR_PTR * on error. parent->d_inode->i_mutex must be held. d_lookup must * have verified that no child exists while under i_mutex. */ static struct dentry *d_alloc_and_lookup(struct dentry *parent, struct qstr *name, struct nameidata *nd) { struct inode *inode = parent->d_inode; struct dentry *dentry; struct dentry *old; /* Don't create child dentry for a dead directory. */ if (unlikely(IS_DEADDIR(inode))) return ERR_PTR(-ENOENT); dentry = d_alloc(parent, name); if (unlikely(!dentry)) return ERR_PTR(-ENOMEM); old = inode->i_op->lookup(inode, dentry, nd); if (unlikely(old)) { dput(dentry); dentry = old; } return dentry; } /* * It's more convoluted than I'd like it to be, but... it's still fairly * small and for now I'd prefer to have fast path as straight as possible. * It _is_ time-critical. */ static int do_lookup(struct nameidata *nd, struct qstr *name, struct path *path, struct inode **inode) { struct vfsmount *mnt = nd->path.mnt; struct dentry *dentry, *parent = nd->path.dentry; int need_reval = 1; int status = 1; int err; /* * Rename seqlock is not required here because in the off chance * of a false negative due to a concurrent rename, we're going to * do the non-racy lookup, below. */ if (nd->flags & LOOKUP_RCU) { unsigned seq; *inode = nd->inode; dentry = __d_lookup_rcu(parent, name, &seq, inode); if (!dentry) goto unlazy; /* Memory barrier in read_seqcount_begin of child is enough */ if (__read_seqcount_retry(&parent->d_seq, nd->seq)) return -ECHILD; nd->seq = seq; if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE)) { status = d_revalidate(dentry, nd); if (unlikely(status <= 0)) { if (status != -ECHILD) need_reval = 0; goto unlazy; } } path->mnt = mnt; path->dentry = dentry; if (unlikely(!__follow_mount_rcu(nd, path, inode))) goto unlazy; if (unlikely(path->dentry->d_flags & DCACHE_NEED_AUTOMOUNT)) goto unlazy; return 0; unlazy: if (unlazy_walk(nd, dentry)) return -ECHILD; } else { dentry = __d_lookup(parent, name); } retry: if (unlikely(!dentry)) { struct inode *dir = parent->d_inode; BUG_ON(nd->inode != dir); mutex_lock(&dir->i_mutex); dentry = d_lookup(parent, name); if (likely(!dentry)) { dentry = d_alloc_and_lookup(parent, name, nd); if (IS_ERR(dentry)) { mutex_unlock(&dir->i_mutex); return PTR_ERR(dentry); } /* known good */ need_reval = 0; status = 1; } mutex_unlock(&dir->i_mutex); } if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE) && need_reval) status = d_revalidate(dentry, nd); if (unlikely(status <= 0)) { if (status < 0) { dput(dentry); return status; } if (!d_invalidate(dentry)) { dput(dentry); dentry = NULL; need_reval = 1; goto retry; } } path->mnt = mnt; path->dentry = dentry; err = follow_managed(path, nd->flags); if (unlikely(err < 0)) { path_put_conditional(path, nd); return err; } *inode = path->dentry->d_inode; return 0; } static inline int may_lookup(struct nameidata *nd) { if (nd->flags & LOOKUP_RCU) { int err = exec_permission(nd->inode, IPERM_FLAG_RCU); if (err != -ECHILD) return err; if (unlazy_walk(nd, NULL)) return -ECHILD; } return exec_permission(nd->inode, 0); } static inline int handle_dots(struct nameidata *nd, int type) { if (type == LAST_DOTDOT) { if (nd->flags & LOOKUP_RCU) { if (follow_dotdot_rcu(nd)) return -ECHILD; } else follow_dotdot(nd); } return 0; } static void terminate_walk(struct nameidata *nd) { if (!(nd->flags & LOOKUP_RCU)) { path_put(&nd->path); } else { nd->flags &= ~LOOKUP_RCU; if (!(nd->flags & LOOKUP_ROOT)) nd->root.mnt = NULL; rcu_read_unlock(); br_read_unlock(vfsmount_lock); } } static inline int walk_component(struct nameidata *nd, struct path *path, struct qstr *name, int type, int follow) { struct inode *inode; int err; /* * "." and ".." are special - ".." especially so because it has * to be able to know about the current root directory and * parent relationships. */ if (unlikely(type != LAST_NORM)) return handle_dots(nd, type); err = do_lookup(nd, name, path, &inode); if (unlikely(err)) { terminate_walk(nd); return err; } if (!inode) { path_to_nameidata(path, nd); terminate_walk(nd); return -ENOENT; } if (unlikely(inode->i_op->follow_link) && follow) { if (nd->flags & LOOKUP_RCU) { if (unlikely(unlazy_walk(nd, path->dentry))) { terminate_walk(nd); return -ECHILD; } } BUG_ON(inode != path->dentry->d_inode); return 1; } path_to_nameidata(path, nd); nd->inode = inode; return 0; } /* * This limits recursive symlink follows to 8, while * limiting consecutive symlinks to 40. * * Without that kind of total limit, nasty chains of consecutive * symlinks can cause almost arbitrarily long lookups. */ static inline int nested_symlink(struct path *path, struct nameidata *nd) { int res; if (unlikely(current->link_count >= MAX_NESTED_LINKS)) { path_put_conditional(path, nd); path_put(&nd->path); return -ELOOP; } BUG_ON(nd->depth >= MAX_NESTED_LINKS); nd->depth++; current->link_count++; do { struct path link = *path; void *cookie; res = follow_link(&link, nd, &cookie); if (!res) res = walk_component(nd, path, &nd->last, nd->last_type, LOOKUP_FOLLOW); put_link(nd, &link, cookie); } while (res > 0); current->link_count--; nd->depth--; return res; } /* * Name resolution. * This is the basic name resolution function, turning a pathname into * the final dentry. We expect 'base' to be positive and a directory. * * Returns 0 and nd will have valid dentry and mnt on success. * Returns error and drops reference to input namei data on failure. */ static int link_path_walk(const char *name, struct nameidata *nd) { struct path next; int err; unsigned int lookup_flags = nd->flags; while (*name=='/') name++; if (!*name) return 0; /* At this point we know we have a real path component. */ for(;;) { unsigned long hash; struct qstr this; unsigned int c; int type; nd->flags |= LOOKUP_CONTINUE; err = may_lookup(nd); if (err) break; this.name = name; c = *(const unsigned char *)name; hash = init_name_hash(); do { name++; hash = partial_name_hash(c, hash); c = *(const unsigned char *)name; } while (c && (c != '/')); this.len = name - (const char *) this.name; this.hash = end_name_hash(hash); type = LAST_NORM; if (this.name[0] == '.') switch (this.len) { case 2: if (this.name[1] == '.') { type = LAST_DOTDOT; nd->flags |= LOOKUP_JUMPED; } break; case 1: type = LAST_DOT; } if (likely(type == LAST_NORM)) { struct dentry *parent = nd->path.dentry; nd->flags &= ~LOOKUP_JUMPED; if (unlikely(parent->d_flags & DCACHE_OP_HASH)) { err = parent->d_op->d_hash(parent, nd->inode, &this); if (err < 0) break; } } /* remove trailing slashes? */ if (!c) goto last_component; while (*++name == '/'); if (!*name) goto last_component; err = walk_component(nd, &next, &this, type, LOOKUP_FOLLOW); if (err < 0) return err; if (err) { err = nested_symlink(&next, nd); if (err) return err; } err = -ENOTDIR; if (!nd->inode->i_op->lookup) break; continue; /* here ends the main loop */ last_component: /* Clear LOOKUP_CONTINUE iff it was previously unset */ nd->flags &= lookup_flags | ~LOOKUP_CONTINUE; nd->last = this; nd->last_type = type; return 0; } terminate_walk(nd); return err; } static int path_init(int dfd, const char *name, unsigned int flags, struct nameidata *nd, struct file **fp) { int retval = 0; int fput_needed; struct file *file; nd->last_type = LAST_ROOT; /* if there are only slashes... */ nd->flags = flags | LOOKUP_JUMPED; nd->depth = 0; if (flags & LOOKUP_ROOT) { struct inode *inode = nd->root.dentry->d_inode; if (*name) { if (!inode->i_op->lookup) return -ENOTDIR; retval = inode_permission(inode, MAY_EXEC); if (retval) return retval; } nd->path = nd->root; nd->inode = inode; if (flags & LOOKUP_RCU) { br_read_lock(vfsmount_lock); rcu_read_lock(); nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq); } else { path_get(&nd->path); } return 0; } nd->root.mnt = NULL; if (*name=='/') { if (flags & LOOKUP_RCU) { br_read_lock(vfsmount_lock); rcu_read_lock(); set_root_rcu(nd); } else { set_root(nd); path_get(&nd->root); } nd->path = nd->root; } else if (dfd == AT_FDCWD) { if (flags & LOOKUP_RCU) { struct fs_struct *fs = current->fs; unsigned seq; br_read_lock(vfsmount_lock); rcu_read_lock(); do { seq = read_seqcount_begin(&fs->seq); nd->path = fs->pwd; nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq); } while (read_seqcount_retry(&fs->seq, seq)); } else { get_fs_pwd(current->fs, &nd->path); } } else { struct dentry *dentry; file = fget_raw_light(dfd, &fput_needed); retval = -EBADF; if (!file) goto out_fail; dentry = file->f_path.dentry; if (*name) { retval = -ENOTDIR; if (!S_ISDIR(dentry->d_inode->i_mode)) goto fput_fail; retval = file_permission(file, MAY_EXEC); if (retval) goto fput_fail; } nd->path = file->f_path; if (flags & LOOKUP_RCU) { if (fput_needed) *fp = file; nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq); br_read_lock(vfsmount_lock); rcu_read_lock(); } else { path_get(&file->f_path); fput_light(file, fput_needed); } } nd->inode = nd->path.dentry->d_inode; return 0; fput_fail: fput_light(file, fput_needed); out_fail: return retval; } static inline int lookup_last(struct nameidata *nd, struct path *path) { if (nd->last_type == LAST_NORM && nd->last.name[nd->last.len]) nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY; nd->flags &= ~LOOKUP_PARENT; return walk_component(nd, path, &nd->last, nd->last_type, nd->flags & LOOKUP_FOLLOW); } /* Returns 0 and nd will be valid on success; Retuns error, otherwise. */ static int path_lookupat(int dfd, const char *name, unsigned int flags, struct nameidata *nd) { struct file *base = NULL; struct path path; int err; /* * Path walking is largely split up into 2 different synchronisation * schemes, rcu-walk and ref-walk (explained in * Documentation/filesystems/path-lookup.txt). These share much of the * path walk code, but some things particularly setup, cleanup, and * following mounts are sufficiently divergent that functions are * duplicated. Typically there is a function foo(), and its RCU * analogue, foo_rcu(). * * -ECHILD is the error number of choice (just to avoid clashes) that * is returned if some aspect of an rcu-walk fails. Such an error must * be handled by restarting a traditional ref-walk (which will always * be able to complete). */ err = path_init(dfd, name, flags | LOOKUP_PARENT, nd, &base); if (unlikely(err)) return err; current->total_link_count = 0; err = link_path_walk(name, nd); if (!err && !(flags & LOOKUP_PARENT)) { err = lookup_last(nd, &path); while (err > 0) { void *cookie; struct path link = path; nd->flags |= LOOKUP_PARENT; err = follow_link(&link, nd, &cookie); if (!err) err = lookup_last(nd, &path); put_link(nd, &link, cookie); } } if (!err) err = complete_walk(nd); if (!err && nd->flags & LOOKUP_DIRECTORY) { if (!nd->inode->i_op->lookup) { path_put(&nd->path); err = -ENOTDIR; } } if (base) fput(base); if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) { path_put(&nd->root); nd->root.mnt = NULL; } return err; } static int do_path_lookup(int dfd, const char *name, unsigned int flags, struct nameidata *nd) { int retval = path_lookupat(dfd, name, flags | LOOKUP_RCU, nd); if (unlikely(retval == -ECHILD)) retval = path_lookupat(dfd, name, flags, nd); if (unlikely(retval == -ESTALE)) retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd); if (likely(!retval)) { if (unlikely(!audit_dummy_context())) { if (nd->path.dentry && nd->inode) audit_inode(name, nd->path.dentry); } } return retval; } int kern_path_parent(const char *name, struct nameidata *nd) { return do_path_lookup(AT_FDCWD, name, LOOKUP_PARENT, nd); } int kern_path(const char *name, unsigned int flags, struct path *path) { struct nameidata nd; int res = do_path_lookup(AT_FDCWD, name, flags, &nd); if (!res) *path = nd.path; return res; } /** * vfs_path_lookup - lookup a file path relative to a dentry-vfsmount pair * @dentry: pointer to dentry of the base directory * @mnt: pointer to vfs mount of the base directory * @name: pointer to file name * @flags: lookup flags * @nd: pointer to nameidata */ int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt, const char *name, unsigned int flags, struct nameidata *nd) { nd->root.dentry = dentry; nd->root.mnt = mnt; /* the first argument of do_path_lookup() is ignored with LOOKUP_ROOT */ return do_path_lookup(AT_FDCWD, name, flags | LOOKUP_ROOT, nd); } static struct dentry *__lookup_hash(struct qstr *name, struct dentry *base, struct nameidata *nd) { struct inode *inode = base->d_inode; struct dentry *dentry; int err; err = exec_permission(inode, 0); if (err) return ERR_PTR(err); /* * Don't bother with __d_lookup: callers are for creat as * well as unlink, so a lot of the time it would cost * a double lookup. */ dentry = d_lookup(base, name); if (dentry && (dentry->d_flags & DCACHE_OP_REVALIDATE)) dentry = do_revalidate(dentry, nd); if (!dentry) dentry = d_alloc_and_lookup(base, name, nd); return dentry; } /* * Restricted form of lookup. Doesn't follow links, single-component only, * needs parent already locked. Doesn't follow mounts. * SMP-safe. */ static struct dentry *lookup_hash(struct nameidata *nd) { return __lookup_hash(&nd->last, nd->path.dentry, nd); } /** * lookup_one_len - filesystem helper to lookup single pathname component * @name: pathname component to lookup * @base: base directory to lookup from * @len: maximum length @len should be interpreted to * * Note that this routine is purely a helper for filesystem usage and should * not be called by generic code. Also note that by using this function the * nameidata argument is passed to the filesystem methods and a filesystem * using this helper needs to be prepared for that. */ struct dentry *lookup_one_len(const char *name, struct dentry *base, int len) { struct qstr this; unsigned long hash; unsigned int c; WARN_ON_ONCE(!mutex_is_locked(&base->d_inode->i_mutex)); this.name = name; this.len = len; if (!len) return ERR_PTR(-EACCES); hash = init_name_hash(); while (len--) { c = *(const unsigned char *)name++; if (c == '/' || c == '\0') return ERR_PTR(-EACCES); hash = partial_name_hash(c, hash); } this.hash = end_name_hash(hash); /* * See if the low-level filesystem might want * to use its own hash.. */ if (base->d_flags & DCACHE_OP_HASH) { int err = base->d_op->d_hash(base, base->d_inode, &this); if (err < 0) return ERR_PTR(err); } return __lookup_hash(&this, base, NULL); } int user_path_at(int dfd, const char __user *name, unsigned flags, struct path *path) { struct nameidata nd; char *tmp = getname_flags(name, flags); int err = PTR_ERR(tmp); if (!IS_ERR(tmp)) { BUG_ON(flags & LOOKUP_PARENT); err = do_path_lookup(dfd, tmp, flags, &nd); putname(tmp); if (!err) *path = nd.path; } return err; } static int user_path_parent(int dfd, const char __user *path, struct nameidata *nd, char **name) { char *s = getname(path); int error; if (IS_ERR(s)) return PTR_ERR(s); error = do_path_lookup(dfd, s, LOOKUP_PARENT, nd); if (error) putname(s); else *name = s; return error; } /* * It's inline, so penalty for filesystems that don't use sticky bit is * minimal. */ static inline int check_sticky(struct inode *dir, struct inode *inode) { uid_t fsuid = current_fsuid(); if (!(dir->i_mode & S_ISVTX)) return 0; if (current_user_ns() != inode_userns(inode)) goto other_userns; if (inode->i_uid == fsuid) return 0; if (dir->i_uid == fsuid) return 0; other_userns: return !ns_capable(inode_userns(inode), CAP_FOWNER); } /* * Check whether we can remove a link victim from directory dir, check * whether the type of victim is right. * 1. We can't do it if dir is read-only (done in permission()) * 2. We should have write and exec permissions on dir * 3. We can't remove anything from append-only dir * 4. We can't do anything with immutable dir (done in permission()) * 5. If the sticky bit on dir is set we should either * a. be owner of dir, or * b. be owner of victim, or * c. have CAP_FOWNER capability * 6. If the victim is append-only or immutable we can't do antyhing with * links pointing to it. * 7. If we were asked to remove a directory and victim isn't one - ENOTDIR. * 8. If we were asked to remove a non-directory and victim isn't one - EISDIR. * 9. We can't remove a root or mountpoint. * 10. We don't allow removal of NFS sillyrenamed files; it's handled by * nfs_async_unlink(). */ static int may_delete(struct inode *dir,struct dentry *victim,int isdir) { int error; if (!victim->d_inode) return -ENOENT; BUG_ON(victim->d_parent->d_inode != dir); audit_inode_child(victim, dir); error = inode_permission(dir, MAY_WRITE | MAY_EXEC); if (error) return error; if (IS_APPEND(dir)) return -EPERM; if (check_sticky(dir, victim->d_inode)||IS_APPEND(victim->d_inode)|| IS_IMMUTABLE(victim->d_inode) || IS_SWAPFILE(victim->d_inode)) return -EPERM; if (isdir) { if (!S_ISDIR(victim->d_inode->i_mode)) return -ENOTDIR; if (IS_ROOT(victim)) return -EBUSY; } else if (S_ISDIR(victim->d_inode->i_mode)) return -EISDIR; if (IS_DEADDIR(dir)) return -ENOENT; if (victim->d_flags & DCACHE_NFSFS_RENAMED) return -EBUSY; return 0; } /* Check whether we can create an object with dentry child in directory * dir. * 1. We can't do it if child already exists (open has special treatment for * this case, but since we are inlined it's OK) * 2. We can't do it if dir is read-only (done in permission()) * 3. We should have write and exec permissions on dir * 4. We can't do it if dir is immutable (done in permission()) */ static inline int may_create(struct inode *dir, struct dentry *child) { if (child->d_inode) return -EEXIST; if (IS_DEADDIR(dir)) return -ENOENT; return inode_permission(dir, MAY_WRITE | MAY_EXEC); } /* * p1 and p2 should be directories on the same fs. */ struct dentry *lock_rename(struct dentry *p1, struct dentry *p2) { struct dentry *p; if (p1 == p2) { mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_PARENT); return NULL; } mutex_lock(&p1->d_inode->i_sb->s_vfs_rename_mutex); p = d_ancestor(p2, p1); if (p) { mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_PARENT); mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_CHILD); return p; } p = d_ancestor(p1, p2); if (p) { mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_PARENT); mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_CHILD); return p; } mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_PARENT); mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_CHILD); return NULL; } void unlock_rename(struct dentry *p1, struct dentry *p2) { mutex_unlock(&p1->d_inode->i_mutex); if (p1 != p2) { mutex_unlock(&p2->d_inode->i_mutex); mutex_unlock(&p1->d_inode->i_sb->s_vfs_rename_mutex); } } int vfs_create(struct inode *dir, struct dentry *dentry, int mode, struct nameidata *nd) { int error = may_create(dir, dentry); if (error) return error; if (!dir->i_op->create) return -EACCES; /* shouldn't it be ENOSYS? */ mode &= S_IALLUGO; mode |= S_IFREG; error = security_inode_create(dir, dentry, mode); if (error) return error; error = dir->i_op->create(dir, dentry, mode, nd); if (!error) fsnotify_create(dir, dentry); return error; } static int may_open(struct path *path, int acc_mode, int flag) { struct dentry *dentry = path->dentry; struct inode *inode = dentry->d_inode; int error; /* O_PATH? */ if (!acc_mode) return 0; if (!inode) return -ENOENT; switch (inode->i_mode & S_IFMT) { case S_IFLNK: return -ELOOP; case S_IFDIR: if (acc_mode & MAY_WRITE) return -EISDIR; break; case S_IFBLK: case S_IFCHR: if (path->mnt->mnt_flags & MNT_NODEV) return -EACCES; /*FALLTHRU*/ case S_IFIFO: case S_IFSOCK: flag &= ~O_TRUNC; break; } error = inode_permission(inode, acc_mode); if (error) return error; /* * An append-only file must be opened in append mode for writing. */ if (IS_APPEND(inode)) { if ((flag & O_ACCMODE) != O_RDONLY && !(flag & O_APPEND)) return -EPERM; if (flag & O_TRUNC) return -EPERM; } /* O_NOATIME can only be set by the owner or superuser */ if (flag & O_NOATIME && !inode_owner_or_capable(inode)) return -EPERM; /* * Ensure there are no outstanding leases on the file. */ return break_lease(inode, flag); } static int handle_truncate(struct file *filp) { struct path *path = &filp->f_path; struct inode *inode = path->dentry->d_inode; int error = get_write_access(inode); if (error) return error; /* * Refuse to truncate files with mandatory locks held on them. */ error = locks_verify_locked(inode); if (!error) error = security_path_truncate(path); if (!error) { error = do_truncate(path->dentry, 0, ATTR_MTIME|ATTR_CTIME|ATTR_OPEN, filp); } put_write_access(inode); return error; } /* * Note that while the flag value (low two bits) for sys_open means: * 00 - read-only * 01 - write-only * 10 - read-write * 11 - special * it is changed into * 00 - no permissions needed * 01 - read-permission * 10 - write-permission * 11 - read-write * for the internal routines (ie open_namei()/follow_link() etc) * This is more logical, and also allows the 00 "no perm needed" * to be used for symlinks (where the permissions are checked * later). * */ static inline int open_to_namei_flags(int flag) { if ((flag+1) & O_ACCMODE) flag++; return flag; } /* * Handle the last step of open() */ static struct file *do_last(struct nameidata *nd, struct path *path, const struct open_flags *op, const char *pathname) { struct dentry *dir = nd->path.dentry; struct dentry *dentry; int open_flag = op->open_flag; int will_truncate = open_flag & O_TRUNC; int want_write = 0; int acc_mode = op->acc_mode; struct file *filp; int error; nd->flags &= ~LOOKUP_PARENT; nd->flags |= op->intent; switch (nd->last_type) { case LAST_DOTDOT: case LAST_DOT: error = handle_dots(nd, nd->last_type); if (error) return ERR_PTR(error); /* fallthrough */ case LAST_ROOT: error = complete_walk(nd); if (error) return ERR_PTR(error); audit_inode(pathname, nd->path.dentry); if (open_flag & O_CREAT) { error = -EISDIR; goto exit; } goto ok; case LAST_BIND: error = complete_walk(nd); if (error) return ERR_PTR(error); audit_inode(pathname, dir); goto ok; } if (!(open_flag & O_CREAT)) { int symlink_ok = 0; if (nd->last.name[nd->last.len]) nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY; if (open_flag & O_PATH && !(nd->flags & LOOKUP_FOLLOW)) symlink_ok = 1; /* we _can_ be in RCU mode here */ error = walk_component(nd, path, &nd->last, LAST_NORM, !symlink_ok); if (error < 0) return ERR_PTR(error); if (error) /* symlink */ return NULL; /* sayonara */ error = complete_walk(nd); if (error) return ERR_PTR(-ECHILD); error = -ENOTDIR; if (nd->flags & LOOKUP_DIRECTORY) { if (!nd->inode->i_op->lookup) goto exit; } audit_inode(pathname, nd->path.dentry); goto ok; } /* create side of things */ error = complete_walk(nd); if (error) return ERR_PTR(error); audit_inode(pathname, dir); error = -EISDIR; /* trailing slashes? */ if (nd->last.name[nd->last.len]) goto exit; mutex_lock(&dir->d_inode->i_mutex); dentry = lookup_hash(nd); error = PTR_ERR(dentry); if (IS_ERR(dentry)) { mutex_unlock(&dir->d_inode->i_mutex); goto exit; } path->dentry = dentry; path->mnt = nd->path.mnt; /* Negative dentry, just create the file */ if (!dentry->d_inode) { int mode = op->mode; if (!IS_POSIXACL(dir->d_inode)) mode &= ~current_umask(); /* * This write is needed to ensure that a * rw->ro transition does not occur between * the time when the file is created and when * a permanent write count is taken through * the 'struct file' in nameidata_to_filp(). */ error = mnt_want_write(nd->path.mnt); if (error) goto exit_mutex_unlock; want_write = 1; /* Don't check for write permission, don't truncate */ open_flag &= ~O_TRUNC; will_truncate = 0; acc_mode = MAY_OPEN; error = security_path_mknod(&nd->path, dentry, mode, 0); if (error) goto exit_mutex_unlock; error = vfs_create(dir->d_inode, dentry, mode, nd); if (error) goto exit_mutex_unlock; mutex_unlock(&dir->d_inode->i_mutex); dput(nd->path.dentry); nd->path.dentry = dentry; goto common; } /* * It already exists. */ mutex_unlock(&dir->d_inode->i_mutex); audit_inode(pathname, path->dentry); error = -EEXIST; if (open_flag & O_EXCL) goto exit_dput; error = follow_managed(path, nd->flags); if (error < 0) goto exit_dput; error = -ENOENT; if (!path->dentry->d_inode) goto exit_dput; if (path->dentry->d_inode->i_op->follow_link) return NULL; path_to_nameidata(path, nd); nd->inode = path->dentry->d_inode; error = -EISDIR; if (S_ISDIR(nd->inode->i_mode)) goto exit; ok: if (!S_ISREG(nd->inode->i_mode)) will_truncate = 0; if (will_truncate) { error = mnt_want_write(nd->path.mnt); if (error) goto exit; want_write = 1; } common: error = may_open(&nd->path, acc_mode, open_flag); if (error) goto exit; filp = nameidata_to_filp(nd); if (!IS_ERR(filp)) { error = ima_file_check(filp, op->acc_mode); if (error) { fput(filp); filp = ERR_PTR(error); } } if (!IS_ERR(filp)) { if (will_truncate) { error = handle_truncate(filp); if (error) { fput(filp); filp = ERR_PTR(error); } } } out: if (want_write) mnt_drop_write(nd->path.mnt); path_put(&nd->path); return filp; exit_mutex_unlock: mutex_unlock(&dir->d_inode->i_mutex); exit_dput: path_put_conditional(path, nd); exit: filp = ERR_PTR(error); goto out; } static struct file *path_openat(int dfd, const char *pathname, struct nameidata *nd, const struct open_flags *op, int flags) { struct file *base = NULL; struct file *filp; struct path path; int error; filp = get_empty_filp(); if (!filp) return ERR_PTR(-ENFILE); filp->f_flags = op->open_flag; nd->intent.open.file = filp; nd->intent.open.flags = open_to_namei_flags(op->open_flag); nd->intent.open.create_mode = op->mode; error = path_init(dfd, pathname, flags | LOOKUP_PARENT, nd, &base); if (unlikely(error)) goto out_filp; current->total_link_count = 0; error = link_path_walk(pathname, nd); if (unlikely(error)) goto out_filp; filp = do_last(nd, &path, op, pathname); while (unlikely(!filp)) { /* trailing symlink */ struct path link = path; void *cookie; if (!(nd->flags & LOOKUP_FOLLOW)) { path_put_conditional(&path, nd); path_put(&nd->path); filp = ERR_PTR(-ELOOP); break; } nd->flags |= LOOKUP_PARENT; nd->flags &= ~(LOOKUP_OPEN|LOOKUP_CREATE|LOOKUP_EXCL); error = follow_link(&link, nd, &cookie); if (unlikely(error)) filp = ERR_PTR(error); else filp = do_last(nd, &path, op, pathname); put_link(nd, &link, cookie); } out: if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) path_put(&nd->root); if (base) fput(base); release_open_intent(nd); return filp; out_filp: filp = ERR_PTR(error); goto out; } struct file *do_filp_open(int dfd, const char *pathname, const struct open_flags *op, int flags) { struct nameidata nd; struct file *filp; filp = path_openat(dfd, pathname, &nd, op, flags | LOOKUP_RCU); if (unlikely(filp == ERR_PTR(-ECHILD))) filp = path_openat(dfd, pathname, &nd, op, flags); if (unlikely(filp == ERR_PTR(-ESTALE))) filp = path_openat(dfd, pathname, &nd, op, flags | LOOKUP_REVAL); return filp; } struct file *do_file_open_root(struct dentry *dentry, struct vfsmount *mnt, const char *name, const struct open_flags *op, int flags) { struct nameidata nd; struct file *file; nd.root.mnt = mnt; nd.root.dentry = dentry; flags |= LOOKUP_ROOT; if (dentry->d_inode->i_op->follow_link && op->intent & LOOKUP_OPEN) return ERR_PTR(-ELOOP); file = path_openat(-1, name, &nd, op, flags | LOOKUP_RCU); if (unlikely(file == ERR_PTR(-ECHILD))) file = path_openat(-1, name, &nd, op, flags); if (unlikely(file == ERR_PTR(-ESTALE))) file = path_openat(-1, name, &nd, op, flags | LOOKUP_REVAL); return file; } /** * lookup_create - lookup a dentry, creating it if it doesn't exist * @nd: nameidata info * @is_dir: directory flag * * Simple function to lookup and return a dentry and create it * if it doesn't exist. Is SMP-safe. * * Returns with nd->path.dentry->d_inode->i_mutex locked. */ struct dentry *lookup_create(struct nameidata *nd, int is_dir) { struct dentry *dentry = ERR_PTR(-EEXIST); mutex_lock_nested(&nd->path.dentry->d_inode->i_mutex, I_MUTEX_PARENT); /* * Yucky last component or no last component at all? * (foo/., foo/.., /////) */ if (nd->last_type != LAST_NORM) goto fail; nd->flags &= ~LOOKUP_PARENT; nd->flags |= LOOKUP_CREATE | LOOKUP_EXCL; nd->intent.open.flags = O_EXCL; /* * Do the final lookup. */ dentry = lookup_hash(nd); if (IS_ERR(dentry)) goto fail; if (dentry->d_inode) goto eexist; /* * Special case - lookup gave negative, but... we had foo/bar/ * From the vfs_mknod() POV we just have a negative dentry - * all is fine. Let's be bastards - you had / on the end, you've * been asking for (non-existent) directory. -ENOENT for you. */ if (unlikely(!is_dir && nd->last.name[nd->last.len])) { dput(dentry); dentry = ERR_PTR(-ENOENT); } return dentry; eexist: dput(dentry); dentry = ERR_PTR(-EEXIST); fail: return dentry; } EXPORT_SYMBOL_GPL(lookup_create); int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev) { int error = may_create(dir, dentry); if (error) return error; if ((S_ISCHR(mode) || S_ISBLK(mode)) && !ns_capable(inode_userns(dir), CAP_MKNOD)) return -EPERM; if (!dir->i_op->mknod) return -EPERM; error = devcgroup_inode_mknod(mode, dev); if (error) return error; error = security_inode_mknod(dir, dentry, mode, dev); if (error) return error; error = dir->i_op->mknod(dir, dentry, mode, dev); if (!error) fsnotify_create(dir, dentry); return error; } static int may_mknod(mode_t mode) { switch (mode & S_IFMT) { case S_IFREG: case S_IFCHR: case S_IFBLK: case S_IFIFO: case S_IFSOCK: case 0: /* zero mode translates to S_IFREG */ return 0; case S_IFDIR: return -EPERM; default: return -EINVAL; } } SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode, unsigned, dev) { int error; char *tmp; struct dentry *dentry; struct nameidata nd; if (S_ISDIR(mode)) return -EPERM; error = user_path_parent(dfd, filename, &nd, &tmp); if (error) return error; dentry = lookup_create(&nd, 0); if (IS_ERR(dentry)) { error = PTR_ERR(dentry); goto out_unlock; } if (!IS_POSIXACL(nd.path.dentry->d_inode)) mode &= ~current_umask(); error = may_mknod(mode); if (error) goto out_dput; error = mnt_want_write(nd.path.mnt); if (error) goto out_dput; error = security_path_mknod(&nd.path, dentry, mode, dev); if (error) goto out_drop_write; switch (mode & S_IFMT) { case 0: case S_IFREG: error = vfs_create(nd.path.dentry->d_inode,dentry,mode,&nd); break; case S_IFCHR: case S_IFBLK: error = vfs_mknod(nd.path.dentry->d_inode,dentry,mode, new_decode_dev(dev)); break; case S_IFIFO: case S_IFSOCK: error = vfs_mknod(nd.path.dentry->d_inode,dentry,mode,0); break; } out_drop_write: mnt_drop_write(nd.path.mnt); out_dput: dput(dentry); out_unlock: mutex_unlock(&nd.path.dentry->d_inode->i_mutex); path_put(&nd.path); putname(tmp); return error; } SYSCALL_DEFINE3(mknod, const char __user *, filename, int, mode, unsigned, dev) { return sys_mknodat(AT_FDCWD, filename, mode, dev); } int vfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) { int error = may_create(dir, dentry); if (error) return error; if (!dir->i_op->mkdir) return -EPERM; mode &= (S_IRWXUGO|S_ISVTX); error = security_inode_mkdir(dir, dentry, mode); if (error) return error; error = dir->i_op->mkdir(dir, dentry, mode); if (!error) fsnotify_mkdir(dir, dentry); return error; } SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode) { int error = 0; char * tmp; struct dentry *dentry; struct nameidata nd; error = user_path_parent(dfd, pathname, &nd, &tmp); if (error) goto out_err; dentry = lookup_create(&nd, 1); error = PTR_ERR(dentry); if (IS_ERR(dentry)) goto out_unlock; if (!IS_POSIXACL(nd.path.dentry->d_inode)) mode &= ~current_umask(); error = mnt_want_write(nd.path.mnt); if (error) goto out_dput; error = security_path_mkdir(&nd.path, dentry, mode); if (error) goto out_drop_write; error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode); out_drop_write: mnt_drop_write(nd.path.mnt); out_dput: dput(dentry); out_unlock: mutex_unlock(&nd.path.dentry->d_inode->i_mutex); path_put(&nd.path); putname(tmp); out_err: return error; } SYSCALL_DEFINE2(mkdir, const char __user *, pathname, int, mode) { return sys_mkdirat(AT_FDCWD, pathname, mode); } /* * The dentry_unhash() helper will try to drop the dentry early: we * should have a usage count of 2 if we're the only user of this * dentry, and if that is true (possibly after pruning the dcache), * then we drop the dentry now. * * A low-level filesystem can, if it choses, legally * do a * * if (!d_unhashed(dentry)) * return -EBUSY; * * if it cannot handle the case of removing a directory * that is still in use by something else.. */ void dentry_unhash(struct dentry *dentry) { shrink_dcache_parent(dentry); spin_lock(&dentry->d_lock); if (dentry->d_count == 1) __d_drop(dentry); spin_unlock(&dentry->d_lock); } int vfs_rmdir(struct inode *dir, struct dentry *dentry) { int error = may_delete(dir, dentry, 1); if (error) return error; if (!dir->i_op->rmdir) return -EPERM; mutex_lock(&dentry->d_inode->i_mutex); error = -EBUSY; if (d_mountpoint(dentry)) goto out; error = security_inode_rmdir(dir, dentry); if (error) goto out; shrink_dcache_parent(dentry); error = dir->i_op->rmdir(dir, dentry); if (error) goto out; dentry->d_inode->i_flags |= S_DEAD; dont_mount(dentry); out: mutex_unlock(&dentry->d_inode->i_mutex); if (!error) d_delete(dentry); return error; } static long do_rmdir(int dfd, const char __user *pathname) { int error = 0; char * name; struct dentry *dentry; struct nameidata nd; error = user_path_parent(dfd, pathname, &nd, &name); if (error) return error; switch(nd.last_type) { case LAST_DOTDOT: error = -ENOTEMPTY; goto exit1; case LAST_DOT: error = -EINVAL; goto exit1; case LAST_ROOT: error = -EBUSY; goto exit1; } nd.flags &= ~LOOKUP_PARENT; mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT); dentry = lookup_hash(&nd); error = PTR_ERR(dentry); if (IS_ERR(dentry)) goto exit2; if (!dentry->d_inode) { error = -ENOENT; goto exit3; } error = mnt_want_write(nd.path.mnt); if (error) goto exit3; error = security_path_rmdir(&nd.path, dentry); if (error) goto exit4; error = vfs_rmdir(nd.path.dentry->d_inode, dentry); exit4: mnt_drop_write(nd.path.mnt); exit3: dput(dentry); exit2: mutex_unlock(&nd.path.dentry->d_inode->i_mutex); exit1: path_put(&nd.path); putname(name); return error; } SYSCALL_DEFINE1(rmdir, const char __user *, pathname) { return do_rmdir(AT_FDCWD, pathname); } int vfs_unlink(struct inode *dir, struct dentry *dentry) { int error = may_delete(dir, dentry, 0); if (error) return error; if (!dir->i_op->unlink) return -EPERM; mutex_lock(&dentry->d_inode->i_mutex); if (d_mountpoint(dentry)) error = -EBUSY; else { error = security_inode_unlink(dir, dentry); if (!error) { error = dir->i_op->unlink(dir, dentry); if (!error) dont_mount(dentry); } } mutex_unlock(&dentry->d_inode->i_mutex); /* We don't d_delete() NFS sillyrenamed files--they still exist. */ if (!error && !(dentry->d_flags & DCACHE_NFSFS_RENAMED)) { fsnotify_link_count(dentry->d_inode); d_delete(dentry); } return error; } /* * Make sure that the actual truncation of the file will occur outside its * directory's i_mutex. Truncate can take a long time if there is a lot of * writeout happening, and we don't want to prevent access to the directory * while waiting on the I/O. */ static long do_unlinkat(int dfd, const char __user *pathname) { int error; char *name; struct dentry *dentry; struct nameidata nd; struct inode *inode = NULL; error = user_path_parent(dfd, pathname, &nd, &name); if (error) return error; error = -EISDIR; if (nd.last_type != LAST_NORM) goto exit1; nd.flags &= ~LOOKUP_PARENT; mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT); dentry = lookup_hash(&nd); error = PTR_ERR(dentry); if (!IS_ERR(dentry)) { /* Why not before? Because we want correct error value */ if (nd.last.name[nd.last.len]) goto slashes; inode = dentry->d_inode; if (!inode) goto slashes; ihold(inode); error = mnt_want_write(nd.path.mnt); if (error) goto exit2; error = security_path_unlink(&nd.path, dentry); if (error) goto exit3; error = vfs_unlink(nd.path.dentry->d_inode, dentry); exit3: mnt_drop_write(nd.path.mnt); exit2: dput(dentry); } mutex_unlock(&nd.path.dentry->d_inode->i_mutex); if (inode) iput(inode); /* truncate the inode here */ exit1: path_put(&nd.path); putname(name); return error; slashes: error = !dentry->d_inode ? -ENOENT : S_ISDIR(dentry->d_inode->i_mode) ? -EISDIR : -ENOTDIR; goto exit2; } SYSCALL_DEFINE3(unlinkat, int, dfd, const char __user *, pathname, int, flag) { if ((flag & ~AT_REMOVEDIR) != 0) return -EINVAL; if (flag & AT_REMOVEDIR) return do_rmdir(dfd, pathname); return do_unlinkat(dfd, pathname); } SYSCALL_DEFINE1(unlink, const char __user *, pathname) { return do_unlinkat(AT_FDCWD, pathname); } int vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname) { int error = may_create(dir, dentry); if (error) return error; if (!dir->i_op->symlink) return -EPERM; error = security_inode_symlink(dir, dentry, oldname); if (error) return error; error = dir->i_op->symlink(dir, dentry, oldname); if (!error) fsnotify_create(dir, dentry); return error; } SYSCALL_DEFINE3(symlinkat, const char __user *, oldname, int, newdfd, const char __user *, newname) { int error; char *from; char *to; struct dentry *dentry; struct nameidata nd; from = getname(oldname); if (IS_ERR(from)) return PTR_ERR(from); error = user_path_parent(newdfd, newname, &nd, &to); if (error) goto out_putname; dentry = lookup_create(&nd, 0); error = PTR_ERR(dentry); if (IS_ERR(dentry)) goto out_unlock; error = mnt_want_write(nd.path.mnt); if (error) goto out_dput; error = security_path_symlink(&nd.path, dentry, from); if (error) goto out_drop_write; error = vfs_symlink(nd.path.dentry->d_inode, dentry, from); out_drop_write: mnt_drop_write(nd.path.mnt); out_dput: dput(dentry); out_unlock: mutex_unlock(&nd.path.dentry->d_inode->i_mutex); path_put(&nd.path); putname(to); out_putname: putname(from); return error; } SYSCALL_DEFINE2(symlink, const char __user *, oldname, const char __user *, newname) { return sys_symlinkat(oldname, AT_FDCWD, newname); } int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry) { struct inode *inode = old_dentry->d_inode; int error; if (!inode) return -ENOENT; error = may_create(dir, new_dentry); if (error) return error; if (dir->i_sb != inode->i_sb) return -EXDEV; /* * A link to an append-only or immutable file cannot be created. */ if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) return -EPERM; if (!dir->i_op->link) return -EPERM; if (S_ISDIR(inode->i_mode)) return -EPERM; error = security_inode_link(old_dentry, dir, new_dentry); if (error) return error; mutex_lock(&inode->i_mutex); /* Make sure we don't allow creating hardlink to an unlinked file */ if (inode->i_nlink == 0) error = -ENOENT; else error = dir->i_op->link(old_dentry, dir, new_dentry); mutex_unlock(&inode->i_mutex); if (!error) fsnotify_link(dir, inode, new_dentry); return error; } /* * Hardlinks are often used in delicate situations. We avoid * security-related surprises by not following symlinks on the * newname. --KAB * * We don't follow them on the oldname either to be compatible * with linux 2.0, and to avoid hard-linking to directories * and other special files. --ADM */ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname, int, newdfd, const char __user *, newname, int, flags) { struct dentry *new_dentry; struct nameidata nd; struct path old_path; int how = 0; int error; char *to; if ((flags & ~(AT_SYMLINK_FOLLOW | AT_EMPTY_PATH)) != 0) return -EINVAL; /* * To use null names we require CAP_DAC_READ_SEARCH * This ensures that not everyone will be able to create * handlink using the passed filedescriptor. */ if (flags & AT_EMPTY_PATH) { if (!capable(CAP_DAC_READ_SEARCH)) return -ENOENT; how = LOOKUP_EMPTY; } if (flags & AT_SYMLINK_FOLLOW) how |= LOOKUP_FOLLOW; error = user_path_at(olddfd, oldname, how, &old_path); if (error) return error; error = user_path_parent(newdfd, newname, &nd, &to); if (error) goto out; error = -EXDEV; if (old_path.mnt != nd.path.mnt) goto out_release; new_dentry = lookup_create(&nd, 0); error = PTR_ERR(new_dentry); if (IS_ERR(new_dentry)) goto out_unlock; error = mnt_want_write(nd.path.mnt); if (error) goto out_dput; error = security_path_link(old_path.dentry, &nd.path, new_dentry); if (error) goto out_drop_write; error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry); out_drop_write: mnt_drop_write(nd.path.mnt); out_dput: dput(new_dentry); out_unlock: mutex_unlock(&nd.path.dentry->d_inode->i_mutex); out_release: path_put(&nd.path); putname(to); out: path_put(&old_path); return error; } SYSCALL_DEFINE2(link, const char __user *, oldname, const char __user *, newname) { return sys_linkat(AT_FDCWD, oldname, AT_FDCWD, newname, 0); } /* * The worst of all namespace operations - renaming directory. "Perverted" * doesn't even start to describe it. Somebody in UCB had a heck of a trip... * Problems: * a) we can get into loop creation. Check is done in is_subdir(). * b) race potential - two innocent renames can create a loop together. * That's where 4.4 screws up. Current fix: serialization on * sb->s_vfs_rename_mutex. We might be more accurate, but that's another * story. * c) we have to lock _three_ objects - parents and victim (if it exists). * And that - after we got ->i_mutex on parents (until then we don't know * whether the target exists). Solution: try to be smart with locking * order for inodes. We rely on the fact that tree topology may change * only under ->s_vfs_rename_mutex _and_ that parent of the object we * move will be locked. Thus we can rank directories by the tree * (ancestors first) and rank all non-directories after them. * That works since everybody except rename does "lock parent, lookup, * lock child" and rename is under ->s_vfs_rename_mutex. * HOWEVER, it relies on the assumption that any object with ->lookup() * has no more than 1 dentry. If "hybrid" objects will ever appear, * we'd better make sure that there's no link(2) for them. * d) conversion from fhandle to dentry may come in the wrong moment - when * we are removing the target. Solution: we will have to grab ->i_mutex * in the fhandle_to_dentry code. [FIXME - current nfsfh.c relies on * ->i_mutex on parents, which works but leads to some truly excessive * locking]. */ static int vfs_rename_dir(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { int error = 0; struct inode *target = new_dentry->d_inode; /* * If we are going to change the parent - check write permissions, * we'll need to flip '..'. */ if (new_dir != old_dir) { error = inode_permission(old_dentry->d_inode, MAY_WRITE); if (error) return error; } error = security_inode_rename(old_dir, old_dentry, new_dir, new_dentry); if (error) return error; if (target) mutex_lock(&target->i_mutex); error = -EBUSY; if (d_mountpoint(old_dentry) || d_mountpoint(new_dentry)) goto out; if (target) shrink_dcache_parent(new_dentry); error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry); if (error) goto out; if (target) { target->i_flags |= S_DEAD; dont_mount(new_dentry); } out: if (target) mutex_unlock(&target->i_mutex); if (!error) if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE)) d_move(old_dentry,new_dentry); return error; } static int vfs_rename_other(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { struct inode *target = new_dentry->d_inode; int error; error = security_inode_rename(old_dir, old_dentry, new_dir, new_dentry); if (error) return error; dget(new_dentry); if (target) mutex_lock(&target->i_mutex); error = -EBUSY; if (d_mountpoint(old_dentry)||d_mountpoint(new_dentry)) goto out; error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry); if (error) goto out; if (target) dont_mount(new_dentry); if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE)) d_move(old_dentry, new_dentry); out: if (target) mutex_unlock(&target->i_mutex); dput(new_dentry); return error; } int vfs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { int error; int is_dir = S_ISDIR(old_dentry->d_inode->i_mode); const unsigned char *old_name; if (old_dentry->d_inode == new_dentry->d_inode) return 0; error = may_delete(old_dir, old_dentry, is_dir); if (error) return error; if (!new_dentry->d_inode) error = may_create(new_dir, new_dentry); else error = may_delete(new_dir, new_dentry, is_dir); if (error) return error; if (!old_dir->i_op->rename) return -EPERM; old_name = fsnotify_oldname_init(old_dentry->d_name.name); if (is_dir) error = vfs_rename_dir(old_dir,old_dentry,new_dir,new_dentry); else error = vfs_rename_other(old_dir,old_dentry,new_dir,new_dentry); if (!error) fsnotify_move(old_dir, new_dir, old_name, is_dir, new_dentry->d_inode, old_dentry); fsnotify_oldname_free(old_name); return error; } SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname, int, newdfd, const char __user *, newname) { struct dentry *old_dir, *new_dir; struct dentry *old_dentry, *new_dentry; struct dentry *trap; struct nameidata oldnd, newnd; char *from; char *to; int error; error = user_path_parent(olddfd, oldname, &oldnd, &from); if (error) goto exit; error = user_path_parent(newdfd, newname, &newnd, &to); if (error) goto exit1; error = -EXDEV; if (oldnd.path.mnt != newnd.path.mnt) goto exit2; old_dir = oldnd.path.dentry; error = -EBUSY; if (oldnd.last_type != LAST_NORM) goto exit2; new_dir = newnd.path.dentry; if (newnd.last_type != LAST_NORM) goto exit2; oldnd.flags &= ~LOOKUP_PARENT; newnd.flags &= ~LOOKUP_PARENT; newnd.flags |= LOOKUP_RENAME_TARGET; trap = lock_rename(new_dir, old_dir); old_dentry = lookup_hash(&oldnd); error = PTR_ERR(old_dentry); if (IS_ERR(old_dentry)) goto exit3; /* source must exist */ error = -ENOENT; if (!old_dentry->d_inode) goto exit4; /* unless the source is a directory trailing slashes give -ENOTDIR */ if (!S_ISDIR(old_dentry->d_inode->i_mode)) { error = -ENOTDIR; if (oldnd.last.name[oldnd.last.len]) goto exit4; if (newnd.last.name[newnd.last.len]) goto exit4; } /* source should not be ancestor of target */ error = -EINVAL; if (old_dentry == trap) goto exit4; new_dentry = lookup_hash(&newnd); error = PTR_ERR(new_dentry); if (IS_ERR(new_dentry)) goto exit4; /* target should not be an ancestor of source */ error = -ENOTEMPTY; if (new_dentry == trap) goto exit5; error = mnt_want_write(oldnd.path.mnt); if (error) goto exit5; error = security_path_rename(&oldnd.path, old_dentry, &newnd.path, new_dentry); if (error) goto exit6; error = vfs_rename(old_dir->d_inode, old_dentry, new_dir->d_inode, new_dentry); exit6: mnt_drop_write(oldnd.path.mnt); exit5: dput(new_dentry); exit4: dput(old_dentry); exit3: unlock_rename(new_dir, old_dir); exit2: path_put(&newnd.path); putname(to); exit1: path_put(&oldnd.path); putname(from); exit: return error; } SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newname) { return sys_renameat(AT_FDCWD, oldname, AT_FDCWD, newname); } int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link) { int len; len = PTR_ERR(link); if (IS_ERR(link)) goto out; len = strlen(link); if (len > (unsigned) buflen) len = buflen; if (copy_to_user(buffer, link, len)) len = -EFAULT; out: return len; } /* * A helper for ->readlink(). This should be used *ONLY* for symlinks that * have ->follow_link() touching nd only in nd_set_link(). Using (or not * using) it for any given inode is up to filesystem. */ int generic_readlink(struct dentry *dentry, char __user *buffer, int buflen) { struct nameidata nd; void *cookie; int res; nd.depth = 0; cookie = dentry->d_inode->i_op->follow_link(dentry, &nd); if (IS_ERR(cookie)) return PTR_ERR(cookie); res = vfs_readlink(dentry, buffer, buflen, nd_get_link(&nd)); if (dentry->d_inode->i_op->put_link) dentry->d_inode->i_op->put_link(dentry, &nd, cookie); return res; } int vfs_follow_link(struct nameidata *nd, const char *link) { return __vfs_follow_link(nd, link); } /* get the link contents into pagecache */ static char *page_getlink(struct dentry * dentry, struct page **ppage) { char *kaddr; struct page *page; struct address_space *mapping = dentry->d_inode->i_mapping; page = read_mapping_page(mapping, 0, NULL); if (IS_ERR(page)) return (char*)page; *ppage = page; kaddr = kmap(page); nd_terminate_link(kaddr, dentry->d_inode->i_size, PAGE_SIZE - 1); return kaddr; } int page_readlink(struct dentry *dentry, char __user *buffer, int buflen) { struct page *page = NULL; char *s = page_getlink(dentry, &page); int res = vfs_readlink(dentry,buffer,buflen,s); if (page) { kunmap(page); page_cache_release(page); } return res; } void *page_follow_link_light(struct dentry *dentry, struct nameidata *nd) { struct page *page = NULL; nd_set_link(nd, page_getlink(dentry, &page)); return page; } void page_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) { struct page *page = cookie; if (page) { kunmap(page); page_cache_release(page); } } /* * The nofs argument instructs pagecache_write_begin to pass AOP_FLAG_NOFS */ int __page_symlink(struct inode *inode, const char *symname, int len, int nofs) { struct address_space *mapping = inode->i_mapping; struct page *page; void *fsdata; int err; char *kaddr; unsigned int flags = AOP_FLAG_UNINTERRUPTIBLE; if (nofs) flags |= AOP_FLAG_NOFS; retry: err = pagecache_write_begin(NULL, mapping, 0, len-1, flags, &page, &fsdata); if (err) goto fail; kaddr = kmap_atomic(page, KM_USER0); memcpy(kaddr, symname, len-1); kunmap_atomic(kaddr, KM_USER0); err = pagecache_write_end(NULL, mapping, 0, len-1, len-1, page, fsdata); if (err < 0) goto fail; if (err < len-1) goto retry; mark_inode_dirty(inode); return 0; fail: return err; } int page_symlink(struct inode *inode, const char *symname, int len) { return __page_symlink(inode, symname, len, !(mapping_gfp_mask(inode->i_mapping) & __GFP_FS)); } const struct inode_operations page_symlink_inode_operations = { .readlink = generic_readlink, .follow_link = page_follow_link_light, .put_link = page_put_link, }; EXPORT_SYMBOL(user_path_at); EXPORT_SYMBOL(follow_down_one); EXPORT_SYMBOL(follow_down); EXPORT_SYMBOL(follow_up); EXPORT_SYMBOL(get_write_access); /* binfmt_aout */ EXPORT_SYMBOL(getname); EXPORT_SYMBOL(lock_rename); EXPORT_SYMBOL(lookup_one_len); EXPORT_SYMBOL(page_follow_link_light); EXPORT_SYMBOL(page_put_link); EXPORT_SYMBOL(page_readlink); EXPORT_SYMBOL(__page_symlink); EXPORT_SYMBOL(page_symlink); EXPORT_SYMBOL(page_symlink_inode_operations); EXPORT_SYMBOL(kern_path_parent); EXPORT_SYMBOL(kern_path); EXPORT_SYMBOL(vfs_path_lookup); EXPORT_SYMBOL(inode_permission); EXPORT_SYMBOL(file_permission); EXPORT_SYMBOL(unlock_rename); EXPORT_SYMBOL(vfs_create); EXPORT_SYMBOL(vfs_follow_link); EXPORT_SYMBOL(vfs_link); EXPORT_SYMBOL(vfs_mkdir); EXPORT_SYMBOL(vfs_mknod); EXPORT_SYMBOL(generic_permission); EXPORT_SYMBOL(vfs_readlink); EXPORT_SYMBOL(vfs_rename); EXPORT_SYMBOL(vfs_rmdir); EXPORT_SYMBOL(vfs_symlink); EXPORT_SYMBOL(vfs_unlink); EXPORT_SYMBOL(dentry_unhash); EXPORT_SYMBOL(generic_readlink);