aboutsummaryrefslogtreecommitdiffstats
path: root/fs/namei.c
blob: 7ac9fb4acb2c7d265fc48dff00810861f65cc4ce (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935<
static int bcsp_enqueue(struct hci_uart *hu, struct sk_buff *skb)
{
	struct bcsp_struct *bcsp = hu->priv;

	if (skb->len > 0xFFF) {
		BT_ERR("Packet too long");
		kfree_skb(skb);
		return 0;
	}

	switch (bt_cb(skb)->pkt_type) {
	case HCI_ACLDATA_PKT:
	case HCI_COMMAND_PKT:
		skb_queue_tail(&bcsp->rel, skb);
		break;

	case HCI_SCODATA_PKT:
		skb_queue_tail(&bcsp->unrel, skb);
		break;

	default:
		BT_ERR("Unknown packet type");
		kfree_skb(skb);
		break;
	}

	return 0;
}

static struct sk_buff *bcsp_prepare_pkt(struct bcsp_struct *bcsp, u8 *data,
		int len, int pkt_type)
{
	struct sk_buff *nskb;
	u8 hdr[4], chan;
	u16 BCSP_CRC_INIT(bcsp_txmsg_crc);
	int rel, i;

	switch (pkt_type) {
	case HCI_ACLDATA_PKT:
		chan = 6;	/* BCSP ACL channel */
		rel = 1;	/* reliable channel */
		break;
	case HCI_COMMAND_PKT:
		chan = 5;	/* BCSP cmd/evt channel */
		rel = 1;	/* reliable channel */
		break;
	case HCI_SCODATA_PKT:
		chan = 7;	/* BCSP SCO channel */
		rel = 0;	/* unreliable channel */
		break;
	case BCSP_LE_PKT:
		chan = 1;	/* BCSP LE channel */
		rel = 0;	/* unreliable channel */
		break;
	case BCSP_ACK_PKT:
		chan = 0;	/* BCSP internal channel */
		rel = 0;	/* unreliable channel */
		break;
	default:
		BT_ERR("Unknown packet type");
		return NULL;
	}

	if (hciextn && chan == 5) {
		struct hci_command_hdr *hdr = (struct hci_command_hdr *) data;

		if (hci_opcode_ogf(__le16_to_cpu(hdr->opcode)) == OGF_VENDOR_CMD) {
			u8 desc = *(data + HCI_COMMAND_HDR_SIZE);
			if ((desc & 0xf0) == 0xc0) {
				data += HCI_COMMAND_HDR_SIZE + 1;
				len  -= HCI_COMMAND_HDR_SIZE + 1;
				chan = desc & 0x0f;
			}
		}
	}

	/* Max len of packet: (original len +4(bcsp hdr) +2(crc))*2
	   (because bytes 0xc0 and 0xdb are escaped, worst case is
	   when the packet is all made of 0xc0 and 0xdb :) )
	   + 2 (0xc0 delimiters at start and end). */

	nskb = alloc_skb((len + 6) * 2 + 2, GFP_ATOMIC);
	if (!nskb)
		return NULL;

	bt_cb(nskb)->pkt_type = pkt_type;

	bcsp_slip_msgdelim(nskb);

	hdr[0] = bcsp->rxseq_txack << 3;
	bcsp->txack_req = 0;
	BT_DBG("We request packet no %u to card", bcsp->rxseq_txack);

	if (rel) {
		hdr[0] |= 0x80 + bcsp->msgq_txseq;
		BT_DBG("Sending packet with seqno %u", bcsp->msgq_txseq);
		bcsp->msgq_txseq = ++(bcsp->msgq_txseq) & 0x07;
	}

	if (bcsp->use_crc)
		hdr[0] |= 0x40;

	hdr[1] = ((len << 4) & 0xff) | chan;
	hdr[2] = len >> 4;
	hdr[3] = ~(hdr[0] + hdr[1] + hdr[2]);

	/* Put BCSP header */
	for (i = 0; i < 4; i++) {
		bcsp_slip_one_byte(nskb, hdr[i]);

		if (bcsp->use_crc)
			bcsp_crc_update(&bcsp_txmsg_crc, hdr[i]);
	}

	/* Put payload */
	for (i = 0; i < len; i++) {
		bcsp_slip_one_byte(nskb, data[i]);

		if (bcsp->use_crc)
			bcsp_crc_update(&bcsp_txmsg_crc, data[i]);
	}

	/* Put CRC */
	if (bcsp->use_crc) {
		bcsp_txmsg_crc = bcsp_crc_reverse(bcsp_txmsg_crc);
		bcsp_slip_one_byte(nskb, (u8) ((bcsp_txmsg_crc >> 8) & 0x00ff));
		bcsp_slip_one_byte(nskb, (u8) (bcsp_txmsg_crc & 0x00ff));
	}

	bcsp_slip_msgdelim(nskb);
	return nskb;
}

/* This is a rewrite of pkt_avail in ABCSP */
static struct sk_buff *bcsp_dequeue(struct hci_uart *hu)
{
	struct bcsp_struct *bcsp = hu->priv;
	unsigned long flags;
	struct sk_buff *skb;
	
	/* First of all, check for unreliable messages in the queue,
	   since they have priority */

	if ((skb = skb_dequeue(&bcsp->unrel)) != NULL) {
		struct sk_buff *nskb = bcsp_prepare_pkt(bcsp, skb->data, skb->len, bt_cb(skb)->pkt_type);
		if (nskb) {
			kfree_skb(skb);
			return nskb;
		} else {
			skb_queue_head(&bcsp->unrel, skb);
			BT_ERR("Could not dequeue pkt because alloc_skb failed");
		}
	}

	/* Now, try to send a reliable pkt. We can only send a
	   reliable packet if the number of packets sent but not yet ack'ed
	   is < than the winsize */

	spin_lock_irqsave(&bcsp->unack.lock, flags);

	if (bcsp->unack.qlen < BCSP_TXWINSIZE && (skb = skb_dequeue(&bcsp->rel)) != NULL) {
		struct sk_buff *nskb = bcsp_prepare_pkt(bcsp, skb->data, skb->len, bt_cb(skb)->pkt_type);
		if (nskb) {
			__skb_queue_tail(&bcsp->unack, skb);
			mod_timer(&bcsp->tbcsp, jiffies + HZ / 4);
			spin_unlock_irqrestore(&bcsp->unack.lock, flags);
			return nskb;
		} else {
			skb_queue_head(&bcsp->rel, skb);
			BT_ERR("Could not dequeue pkt because alloc_skb failed");
		}
	}

	spin_unlock_irqrestore(&bcsp->unack.lock, flags);


	/* We could not send a reliable packet, either because there are
	   none or because there are too many unack'ed pkts. Did we receive
	   any packets we have not acknowledged yet ? */

	if (bcsp->txack_req) {
		/* if so, craft an empty ACK pkt and send it on BCSP unreliable
		   channel 0 */
		struct sk_buff *nskb = bcsp_prepare_pkt(bcsp, NULL, 0, BCSP_ACK_PKT);
		return nskb;
	}

	/* We have nothing to send */
	return NULL;
}

static int bcsp_flush(struct hci_uart *hu)
{
	BT_DBG("hu %p", hu);
	return 0;
}

/* Remove ack'ed packets */
static void bcsp_pkt_cull(struct bcsp_struct *bcsp)
{
	unsigned long flags;
	struct sk_buff *skb;
	int i, pkts_to_be_removed;
	u8 seqno;

	spin_lock_irqsave(&bcsp->unack.lock, flags);

	pkts_to_be_removed = bcsp->unack.qlen;
	seqno = bcsp->msgq_txseq;

	while (pkts_to_be_removed) {
		if (bcsp->rxack == seqno)
			break;
		pkts_to_be_removed--;
		seqno = (seqno - 1) & 0x07;
	}

	if (bcsp->rxack != seqno)
		BT_ERR("Peer acked invalid packet");

	BT_DBG("Removing %u pkts out of %u, up to seqno %u",
	       pkts_to_be_removed, bcsp->unack.qlen, (seqno - 1) & 0x07);

	for (i = 0, skb = ((struct sk_buff *) &bcsp->unack)->next; i < pkts_to_be_removed
			&& skb != (struct sk_buff *) &bcsp->unack; i++) {
		struct sk_buff *nskb;

		nskb = skb->next;
		__skb_unlink(skb, &bcsp->unack);
		kfree_skb(skb);
		skb = nskb;
	}
	if (bcsp->unack.qlen == 0)
		del_timer(&bcsp->tbcsp);
	spin_unlock_irqrestore(&bcsp->unack.lock, flags);

	if (i != pkts_to_be_removed)
		BT_ERR("Removed only %u out of %u pkts", i, pkts_to_be_removed);
}

/* Handle BCSP link-establishment packets. When we
   detect a "sync" packet, symptom that the BT module has reset,
   we do nothing :) (yet) */
static void bcsp_handle_le_pkt(struct hci_uart *hu)
{
	struct bcsp_struct *bcsp = hu->priv;
	u8 conf_pkt[4]     = { 0xad, 0xef, 0xac, 0xed };
	u8 conf_rsp_pkt[4] = { 0xde, 0xad, 0xd0, 0xd0 };
	u8 sync_pkt[4]     = { 0xda, 0xdc, 0xed, 0xed };

	/* spot "conf" pkts and reply with a "conf rsp" pkt */
	if (bcsp->rx_skb->data[1] >> 4 == 4 && bcsp->rx_skb->data[2] == 0 &&
			!memcmp(&bcsp->rx_skb->data[4], conf_pkt, 4)) {
		struct sk_buff *nskb = alloc_skb(4, GFP_ATOMIC);

		BT_DBG("Found a LE conf pkt");
		if (!nskb)
			return;
		memcpy(skb_put(nskb, 4), conf_rsp_pkt, 4);
		bt_cb(nskb)->pkt_type = BCSP_LE_PKT;

		skb_queue_head(&bcsp->unrel, nskb);
		hci_uart_tx_wakeup(hu);
	}
	/* Spot "sync" pkts. If we find one...disaster! */
	else if (bcsp->rx_skb->data[1] >> 4 == 4 && bcsp->rx_skb->data[2] == 0 &&
			!memcmp(&bcsp->rx_skb->data[4], sync_pkt, 4)) {
		BT_ERR("Found a LE sync pkt, card has reset");
	}
}

static inline void bcsp_unslip_one_byte(struct bcsp_struct *bcsp, unsigned char byte)
{
	const u8 c0 = 0xc0, db = 0xdb;

	switch (bcsp->rx_esc_state) {
	case BCSP_ESCSTATE_NOESC:
		switch (byte) {
		case 0xdb:
			bcsp->rx_esc_state = BCSP_ESCSTATE_ESC;
			break;
		default:
			memcpy(skb_put(bcsp->rx_skb, 1), &byte, 1);
			if ((bcsp->rx_skb-> data[0] & 0x40) != 0 && 
					bcsp->rx_state != BCSP_W4_CRC)
				bcsp_crc_update(&bcsp->message_crc, byte);
			bcsp->rx_count--;
		}
		break;

	case BCSP_ESCSTATE_ESC:
		switch (byte) {
		case 0xdc:
			memcpy(skb_put(bcsp->rx_skb, 1), &c0, 1);
			if ((bcsp->rx_skb-> data[0] & 0x40) != 0 && 
					bcsp->rx_state != BCSP_W4_CRC)
				bcsp_crc_update(&bcsp-> message_crc, 0xc0);
			bcsp->rx_esc_state = BCSP_ESCSTATE_NOESC;
			bcsp->rx_count--;
			break;

		case 0xdd:
			memcpy(skb_put(bcsp->rx_skb, 1), &db, 1);
			if ((bcsp->rx_skb-> data[0] & 0x40) != 0 && 
					bcsp->rx_state != BCSP_W4_CRC) 
				bcsp_crc_update(&bcsp-> message_crc, 0xdb);
			bcsp->rx_esc_state = BCSP_ESCSTATE_NOESC;
			bcsp->rx_count--;
			break;

		default:
			BT_ERR ("Invalid byte %02x after esc byte", byte);
			kfree_skb(bcsp->rx_skb);
			bcsp->rx_skb = NULL;
			bcsp->rx_state = BCSP_W4_PKT_DELIMITER;
			bcsp->rx_count = 0;
		}
	}
}

static inline void bcsp_complete_rx_pkt(struct hci_uart *hu)
{
	struct bcsp_struct *bcsp = hu->priv;
	int pass_up;

	if (bcsp->rx_skb->data[0] & 0x80) {	/* reliable pkt */
		BT_DBG("Received seqno %u from card", bcsp->rxseq_txack);
		bcsp->rxseq_txack++;
		bcsp->rxseq_txack %= 0x8;
		bcsp->txack_req    = 1;

		/* If needed, transmit an ack pkt */
		hci_uart_tx_wakeup(hu);
	}

	bcsp->rxack = (bcsp->rx_skb->data[0] >> 3) & 0x07;
	BT_DBG("Request for pkt %u from card", bcsp->rxack);

	bcsp_pkt_cull(bcsp);
	if ((bcsp->rx_skb->data[1] & 0x0f) == 6 &&
			bcsp->rx_skb->data[0] & 0x80) {
		bt_cb(bcsp->rx_skb)->pkt_type = HCI_ACLDATA_PKT;
		pass_up = 1;
	} else if ((bcsp->rx_skb->data[1] & 0x0f) == 5 &&
			bcsp->rx_skb->data[0] & 0x80) {
		bt_cb(bcsp->rx_skb)->pkt_type = HCI_EVENT_PKT;
		pass_up = 1;
	} else if ((bcsp->rx_skb->data[1] & 0x0f) == 7) {
		bt_cb(bcsp->rx_skb)->pkt_type = HCI_SCODATA_PKT;
		pass_up = 1;
	} else if ((bcsp->rx_skb->data[1] & 0x0f) == 1 &&
			!(bcsp->rx_skb->data[0] & 0x80)) {
		bcsp_handle_le_pkt(hu);
		pass_up = 0;
	} else
		pass_up = 0;

	if (!pass_up) {
		struct hci_event_hdr hdr;
		u8 desc = (bcsp->rx_skb->data[1] & 0x0f);

		if (desc != 0 && desc != 1) {
			if (hciextn) {
				desc |= 0xc0;
				skb_pull(bcsp->rx_skb, 4);
				memcpy(skb_push(bcsp->rx_skb, 1), &desc, 1);

				hdr.evt = 0xff;
				hdr.plen = bcsp->rx_skb->len;
				memcpy(skb_push(bcsp->rx_skb, HCI_EVENT_HDR_SIZE), &hdr, HCI_EVENT_HDR_SIZE);
				bt_cb(bcsp->rx_skb)->pkt_type = HCI_EVENT_PKT;

				hci_recv_frame(bcsp->rx_skb);
			} else {
				BT_ERR ("Packet for unknown channel (%u %s)",
					bcsp->rx_skb->data[1] & 0x0f,
					bcsp->rx_skb->data[0] & 0x80 ? 
					"reliable" : "unreliable");
				kfree_skb(bcsp->rx_skb);
			}
		} else
			kfree_skb(bcsp->rx_skb);
	} else {
		/* Pull out BCSP hdr */
		skb_pull(bcsp->rx_skb, 4);

		hci_recv_frame(bcsp->rx_skb);
	}
	bcsp->rx_state = BCSP_W4_PKT_DELIMITER;
	bcsp->rx_skb = NULL;
}

/* Recv data */
static int bcsp_recv(struct hci_uart *hu, void *data, int count)
{
	struct bcsp_struct *bcsp = hu->priv;
	register unsigned char *ptr;

	BT_DBG("hu %p count %d rx_state %d rx_count %ld", 
		hu, count, bcsp->rx_state, bcsp->rx_count);

	ptr = data;
	while (count) {
		if (bcsp->rx_count) {
			if (*ptr == 0xc0) {
				BT_ERR("Short BCSP packet");
				kfree_skb(bcsp->rx_skb);
				bcsp->rx_state = BCSP_W4_PKT_START;
				bcsp->rx_count = 0;
			} else
				bcsp_unslip_one_byte(bcsp, *ptr);

			ptr++; count--;
			continue;
		}

		switch (bcsp->rx_state) {
		case BCSP_W4_BCSP_HDR:
			if ((0xff & (u8) ~ (bcsp->rx_skb->data[0] + bcsp->rx_skb->data[1] +
					bcsp->rx_skb->data[2])) != bcsp->rx_skb->data[3]) {
				BT_ERR("Error in BCSP hdr checksum");
				kfree_skb(bcsp->rx_skb);
				bcsp->rx_state = BCSP_W4_PKT_DELIMITER;
				bcsp->rx_count = 0;
				continue;
			}
			if (bcsp->rx_skb->data[0] & 0x80	/* reliable pkt */
			    		&& (bcsp->rx_skb->data[0] & 0x07) != bcsp->rxseq_txack) {
				BT_ERR ("Out-of-order packet arrived, got %u expected %u",
					bcsp->rx_skb->data[0] & 0x07, bcsp->rxseq_txack);

				kfree_skb(bcsp->rx_skb);
				bcsp->rx_state = BCSP_W4_PKT_DELIMITER;
				bcsp->rx_count = 0;
				continue;
			}
			bcsp->rx_state = BCSP_W4_DATA;
			bcsp->rx_count = (bcsp->rx_skb->data[1] >> 4) + 
					(bcsp->rx_skb->data[2] << 4);	/* May be 0 */
			continue;

		case BCSP_W4_DATA:
			if (bcsp->rx_skb->data[0] & 0x40) {	/* pkt with crc */
				bcsp->rx_state = BCSP_W4_CRC;
				bcsp->rx_count = 2;
			} else
				bcsp_complete_rx_pkt(hu);
			continue;

		case BCSP_W4_CRC:
			if (bcsp_crc_reverse(bcsp->message_crc) !=
					(bcsp->rx_skb->data[bcsp->rx_skb->len - 2] << 8) +
					bcsp->rx_skb->data[bcsp->rx_skb->len - 1]) {

				BT_ERR ("Checksum failed: computed %04x received %04x",
					bcsp_crc_reverse(bcsp->message_crc),
				     	(bcsp->rx_skb-> data[bcsp->rx_skb->len - 2] << 8) +
				     	bcsp->rx_skb->data[bcsp->rx_skb->len - 1]);

				kfree_skb(bcsp->rx_skb);
				bcsp->rx_state = BCSP_W4_PKT_DELIMITER;
				bcsp->rx_count = 0;
				continue;
			}
			skb_trim(bcsp->rx_skb, bcsp->rx_skb->len - 2);
			bcsp_complete_rx_pkt(hu);
			continue;

		case BCSP_W4_PKT_DELIMITER:
			switch (*ptr) {
			case 0xc0:
				bcsp->rx_state = BCSP_W4_PKT_START;
				break;
			default:
				/*BT_ERR("Ignoring byte %02x", *ptr);*/
				break;
			}
			ptr++; count--;
			break;

		case BCSP_W4_PKT_START:
			switch (*ptr) {
			case 0xc0:
				ptr++; count--;
				break;

			default:
				bcsp->rx_state = BCSP_W4_BCSP_HDR;
				bcsp->rx_count = 4;
				bcsp->rx_esc_state = BCSP_ESCSTATE_NOESC;
				BCSP_CRC_INIT(bcsp->message_crc);
				
				/* Do not increment ptr or decrement count
				 * Allocate packet. Max len of a BCSP pkt= 
				 * 0xFFF (payload) +4 (header) +2 (crc) */

				bcsp->rx_skb = bt_skb_alloc(0x1005, GFP_ATOMIC);
				if (!bcsp->rx_skb) {
					BT_ERR("Can't allocate mem for new packet");
					bcsp->rx_state = BCSP_W4_PKT_DELIMITER;
					bcsp->rx_count = 0;
					return 0;
				}
				bcsp->rx_skb->dev = (void *) hu->hdev;
				break;
			}
			break;
		}
	}
	return count;
}

	/* Arrange to retransmit all messages in the relq. */
static void bcsp_timed_event(unsigned long arg)
{
	struct hci_uart *hu = (struct hci_uart *) arg;
	struct bcsp_struct *bcsp = hu->priv;
	struct sk_buff *skb;
	unsigned long flags;

	BT_DBG("hu %p retransmitting %u pkts", hu, bcsp->unack.qlen);

	spin_lock_irqsave(&bcsp->unack.lock, flags);

	while ((skb = __skb_dequeue_tail(&bcsp->unack)) != NULL) {
		bcsp->msgq_txseq = (bcsp->msgq_txseq - 1) & 0x07;
		skb_queue_head(&bcsp->rel, skb);
	}

	spin_unlock_irqrestore(&bcsp->unack.lock, flags);

	hci_uart_tx_wakeup(hu);
}

static int bcsp_open(struct hci_uart *hu)
{
	struct bcsp_struct *bcsp;

	BT_DBG("hu %p", hu);

	bcsp = kmalloc(sizeof(*bcsp), GFP_ATOMIC);
	if (!bcsp)
		return -ENOMEM;
	memset(bcsp, 0, sizeof(*bcsp));

	hu->priv = bcsp;
	skb_queue_head_init(&bcsp->unack);
	skb_queue_head_init(&bcsp->rel);
	skb_queue_head_init(&bcsp->unrel);

	init_timer(&bcsp->tbcsp);
	bcsp->tbcsp.function = bcsp_timed_event;
	bcsp->tbcsp.data     = (u_long) hu;

	bcsp->rx_state = BCSP_W4_PKT_DELIMITER;

	if (txcrc)
		bcsp->use_crc = 1;

	return 0;
}

static int bcsp_close(struct hci_uart *hu)
{
	struct bcsp_struct *bcsp = hu->priv;
	hu->priv = NULL;

	BT_DBG("hu %p", hu);

	skb_queue_purge(&bcsp->unack);
	skb_queue_purge(&bcsp->rel);
	skb_queue_purge(&bcsp->unrel);
	del_timer(&bcsp->tbcsp);

	kfree(bcsp);
	return 0;
}

static struct hci_uart_proto bcsp = {
	.id      = HCI_UART_BCSP,
	.open    = bcsp_open,
	.close   = bcsp_close,
	.enqueue = bcsp_enqueue,
	.dequeue = bcsp_dequeue,
	.recv    = bcsp_recv,
	.flush   = bcsp_flush
};

int bcsp_init(void)
{
	int err = hci_uart_register_proto(&bcsp);
	if (!err)
		BT_INFO("HCI BCSP protocol initialized");
	else
		BT_ERR("HCI BCSP protocol registration failed");

	return err;
}

int bcsp_deinit(void)
{
	return hci_uart_unregister_proto(&bcsp);
}

module_param(txcrc, bool, 0644);
MODULE_PARM_DESC(txcrc, "Transmit CRC with every BCSP packet");

module_param(hciextn, bool, 0644);
MODULE_PARM_DESC(hciextn, "Convert HCI Extensions into BCSP packets");
ned long) filename; } retval = strncpy_from_user(page, filename, len); if (retval > 0) { if (retval < len) return 0; return -ENAMETOOLONG; } else if (!retval) retval = -ENOENT; return retval; } char * getname(const char __user * filename) { char *tmp, *result; result = ERR_PTR(-ENOMEM); tmp = __getname(); if (tmp) { int retval = do_getname(filename, tmp); result = tmp; if (retval < 0) { __putname(tmp); result = ERR_PTR(retval); } } audit_getname(result); return result; } #ifdef CONFIG_AUDITSYSCALL void putname(const char *name) { if (unlikely(current->audit_context)) audit_putname(name); else __putname(name); } EXPORT_SYMBOL(putname); #endif /** * generic_permission - check for access rights on a Posix-like filesystem * @inode: inode to check access rights for * @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC) * @check_acl: optional callback to check for Posix ACLs * * Used to check for read/write/execute permissions on a file. * We use "fsuid" for this, letting us set arbitrary permissions * for filesystem access without changing the "normal" uids which * are used for other things.. */ int generic_permission(struct inode *inode, int mask, int (*check_acl)(struct inode *inode, int mask)) { umode_t mode = inode->i_mode; if (current->fsuid == inode->i_uid) mode >>= 6; else { if (IS_POSIXACL(inode) && (mode & S_IRWXG) && check_acl) { int error = check_acl(inode, mask); if (error == -EACCES) goto check_capabilities; else if (error != -EAGAIN) return error; } if (in_group_p(inode->i_gid)) mode >>= 3; } /* * If the DACs are ok we don't need any capability check. */ if (((mode & mask & (MAY_READ|MAY_WRITE|MAY_EXEC)) == mask)) return 0; check_capabilities: /* * Read/write DACs are always overridable. * Executable DACs are overridable if at least one exec bit is set. */ if (!(mask & MAY_EXEC) || (inode->i_mode & S_IXUGO) || S_ISDIR(inode->i_mode)) if (capable(CAP_DAC_OVERRIDE)) return 0; /* * Searching includes executable on directories, else just read. */ if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE))) if (capable(CAP_DAC_READ_SEARCH)) return 0; return -EACCES; } int permission(struct inode *inode, int mask, struct nameidata *nd) { int retval, submask; if (mask & MAY_WRITE) { umode_t mode = inode->i_mode; /* * Nobody gets write access to a read-only fs. */ if (IS_RDONLY(inode) && (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) return -EROFS; /* * Nobody gets write access to an immutable file. */ if (IS_IMMUTABLE(inode)) return -EACCES; } /* Ordinary permission routines do not understand MAY_APPEND. */ submask = mask & ~MAY_APPEND; if (inode->i_op && inode->i_op->permission) retval = inode->i_op->permission(inode, submask, nd); else retval = generic_permission(inode, submask, NULL); if (retval) return retval; return security_inode_permission(inode, mask, nd); } /** * vfs_permission - check for access rights to a given path * @nd: lookup result that describes the path * @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC) * * Used to check for read/write/execute permissions on a path. * We use "fsuid" for this, letting us set arbitrary permissions * for filesystem access without changing the "normal" uids which * are used for other things. */ int vfs_permission(struct nameidata *nd, int mask) { return permission(nd->dentry->d_inode, mask, nd); } /** * file_permission - check for additional access rights to a given file * @file: file to check access rights for * @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC) * * Used to check for read/write/execute permissions on an already opened * file. * * Note: * Do not use this function in new code. All access checks should * be done using vfs_permission(). */ int file_permission(struct file *file, int mask) { return permission(file->f_dentry->d_inode, mask, NULL); } /* * get_write_access() gets write permission for a file. * put_write_access() releases this write permission. * This is used for regular files. * We cannot support write (and maybe mmap read-write shared) accesses and * MAP_DENYWRITE mmappings simultaneously. The i_writecount field of an inode * can have the following values: * 0: no writers, no VM_DENYWRITE mappings * < 0: (-i_writecount) vm_area_structs with VM_DENYWRITE set exist * > 0: (i_writecount) users are writing to the file. * * Normally we operate on that counter with atomic_{inc,dec} and it's safe * except for the cases where we don't hold i_writecount yet. Then we need to * use {get,deny}_write_access() - these functions check the sign and refuse * to do the change if sign is wrong. Exclusion between them is provided by * the inode->i_lock spinlock. */ int get_write_access(struct inode * inode) { spin_lock(&inode->i_lock); if (atomic_read(&inode->i_writecount) < 0) { spin_unlock(&inode->i_lock); return -ETXTBSY; } atomic_inc(&inode->i_writecount); spin_unlock(&inode->i_lock); return 0; } int deny_write_access(struct file * file) { struct inode *inode = file->f_dentry->d_inode; spin_lock(&inode->i_lock); if (atomic_read(&inode->i_writecount) > 0) { spin_unlock(&inode->i_lock); return -ETXTBSY; } atomic_dec(&inode->i_writecount); spin_unlock(&inode->i_lock); return 0; } void path_release(struct nameidata *nd) { dput(nd->dentry); mntput(nd->mnt); } /* * umount() mustn't call path_release()/mntput() as that would clear * mnt_expiry_mark */ void path_release_on_umount(struct nameidata *nd) { dput(nd->dentry); mntput_no_expire(nd->mnt); } /** * release_open_intent - free up open intent resources * @nd: pointer to nameidata */ void release_open_intent(struct nameidata *nd) { if (nd->intent.open.file->f_dentry == NULL) put_filp(nd->intent.open.file); else fput(nd->intent.open.file); } /* * Internal lookup() using the new generic dcache. * SMP-safe */ static struct dentry * cached_lookup(struct dentry * parent, struct qstr * name, struct nameidata *nd) { struct dentry * dentry = __d_lookup(parent, name); /* lockess __d_lookup may fail due to concurrent d_move() * in some unrelated directory, so try with d_lookup */ if (!dentry) dentry = d_lookup(parent, name); if (dentry && dentry->d_op && dentry->d_op->d_revalidate) { if (!dentry->d_op->d_revalidate(dentry, nd) && !d_invalidate(dentry)) { dput(dentry); dentry = NULL; } } return dentry; } /* * Short-cut version of permission(), for calling by * path_walk(), when dcache lock is held. Combines parts * of permission() and generic_permission(), and tests ONLY for * MAY_EXEC permission. * * If appropriate, check DAC only. If not appropriate, or * short-cut DAC fails, then call permission() to do more * complete permission check. */ static int exec_permission_lite(struct inode *inode, struct nameidata *nd) { umode_t mode = inode->i_mode; if (inode->i_op && inode->i_op->permission) return -EAGAIN; if (current->fsuid == inode->i_uid) mode >>= 6; else if (in_group_p(inode->i_gid)) mode >>= 3; if (mode & MAY_EXEC) goto ok; if ((inode->i_mode & S_IXUGO) && capable(CAP_DAC_OVERRIDE)) goto ok; if (S_ISDIR(inode->i_mode) && capable(CAP_DAC_OVERRIDE)) goto ok; if (S_ISDIR(inode->i_mode) && capable(CAP_DAC_READ_SEARCH)) goto ok; return -EACCES; ok: return security_inode_permission(inode, MAY_EXEC, nd); } /* * This is called when everything else fails, and we actually have * to go to the low-level filesystem to find out what we should do.. * * We get the directory semaphore, and after getting that we also * make sure that nobody added the entry to the dcache in the meantime.. * SMP-safe */ static struct dentry * real_lookup(struct dentry * parent, struct qstr * name, struct nameidata *nd) { struct dentry * result; struct inode *dir = parent->d_inode; mutex_lock(&dir->i_mutex); /* * First re-do the cached lookup just in case it was created * while we waited for the directory semaphore.. * * FIXME! This could use version numbering or similar to * avoid unnecessary cache lookups. * * The "dcache_lock" is purely to protect the RCU list walker * from concurrent renames at this point (we mustn't get false * negatives from the RCU list walk here, unlike the optimistic * fast walk). * * so doing d_lookup() (with seqlock), instead of lockfree __d_lookup */ result = d_lookup(parent, name); if (!result) { struct dentry * dentry = d_alloc(parent, name); result = ERR_PTR(-ENOMEM); if (dentry) { result = dir->i_op->lookup(dir, dentry, nd); if (result) dput(dentry); else result = dentry; } mutex_unlock(&dir->i_mutex); return result; } /* * Uhhuh! Nasty case: the cache was re-populated while * we waited on the semaphore. Need to revalidate. */ mutex_unlock(&dir->i_mutex); if (result->d_op && result->d_op->d_revalidate) { if (!result->d_op->d_revalidate(result, nd) && !d_invalidate(result)) { dput(result); result = ERR_PTR(-ENOENT); } } return result; } static int __emul_lookup_dentry(const char *, struct nameidata *); /* SMP-safe */ static __always_inline int walk_init_root(const char *name, struct nameidata *nd) { read_lock(&current->fs->lock); if (current->fs->altroot && !(nd->flags & LOOKUP_NOALT)) { nd->mnt = mntget(current->fs->altrootmnt); nd->dentry = dget(current->fs->altroot); read_unlock(&current->fs->lock); if (__emul_lookup_dentry(name,nd)) return 0; read_lock(&current->fs->lock); } nd->mnt = mntget(current->fs->rootmnt); nd->dentry = dget(current->fs->root); read_unlock(&current->fs->lock); return 1; } static __always_inline int __vfs_follow_link(struct nameidata *nd, const char *link) { int res = 0; char *name; if (IS_ERR(link)) goto fail; if (*link == '/') { path_release(nd); if (!walk_init_root(link, nd)) /* weird __emul_prefix() stuff did it */ goto out; } res = link_path_walk(link, nd); out: if (nd->depth || res || nd->last_type!=LAST_NORM) return res; /* * If it is an iterative symlinks resolution in open_namei() we * have to copy the last component. And all that crap because of * bloody create() on broken symlinks. Furrfu... */ name = __getname(); if (unlikely(!name)) { path_release(nd); return -ENOMEM; } strcpy(name, nd->last.name); nd->last.name = name; return 0; fail: path_release(nd); return PTR_ERR(link); } struct path { struct vfsmount *mnt; struct dentry *dentry; }; static __always_inline int __do_follow_link(struct path *path, struct nameidata *nd) { int error; void *cookie; struct dentry *dentry = path->dentry; touch_atime(path->mnt, dentry); nd_set_link(nd, NULL); if (path->mnt == nd->mnt) mntget(path->mnt); cookie = dentry->d_inode->i_op->follow_link(dentry, nd); error = PTR_ERR(cookie); if (!IS_ERR(cookie)) { char *s = nd_get_link(nd); error = 0; if (s) error = __vfs_follow_link(nd, s); if (dentry->d_inode->i_op->put_link) dentry->d_inode->i_op->put_link(dentry, nd, cookie); } dput(dentry); mntput(path->mnt); return error; } static inline void dput_path(struct path *path, struct nameidata *nd) { dput(path->dentry); if (path->mnt != nd->mnt) mntput(path->mnt); } static inline void path_to_nameidata(struct path *path, struct nameidata *nd) { dput(nd->dentry); if (nd->mnt != path->mnt) mntput(nd->mnt); nd->mnt = path->mnt; nd->dentry = path->dentry; } /* * This limits recursive symlink follows to 8, while * limiting consecutive symlinks to 40. * * Without that kind of total limit, nasty chains of consecutive * symlinks can cause almost arbitrarily long lookups. */ static inline int do_follow_link(struct path *path, struct nameidata *nd) { int err = -ELOOP; if (current->link_count >= MAX_NESTED_LINKS) goto loop; if (current->total_link_count >= 40) goto loop; BUG_ON(nd->depth >= MAX_NESTED_LINKS); cond_resched(); err = security_inode_follow_link(path->dentry, nd); if (err) goto loop; current->link_count++; current->total_link_count++; nd->depth++; err = __do_follow_link(path, nd); current->link_count--; nd->depth--; return err; loop: dput_path(path, nd); path_release(nd); return err; } int follow_up(struct vfsmount **mnt, struct dentry **dentry) { struct vfsmount *parent; struct dentry *mountpoint; spin_lock(&vfsmount_lock); parent=(*mnt)->mnt_parent; if (parent == *mnt) { spin_unlock(&vfsmount_lock); return 0; } mntget(parent); mountpoint=dget((*mnt)->mnt_mountpoint); spin_unlock(&vfsmount_lock); dput(*dentry); *dentry = mountpoint; mntput(*mnt); *mnt = parent; return 1; } /* no need for dcache_lock, as serialization is taken care in * namespace.c */ static int __follow_mount(struct path *path) { int res = 0; while (d_mountpoint(path->dentry)) { struct vfsmount *mounted = lookup_mnt(path->mnt, path->dentry); if (!mounted) break; dput(path->dentry); if (res) mntput(path->mnt); path->mnt = mounted; path->dentry = dget(mounted->mnt_root); res = 1; } return res; } static void follow_mount(struct vfsmount **mnt, struct dentry **dentry) { while (d_mountpoint(*dentry)) { struct vfsmount *mounted = lookup_mnt(*mnt, *dentry); if (!mounted) break; dput(*dentry); mntput(*mnt); *mnt = mounted; *dentry = dget(mounted->mnt_root); } } /* no need for dcache_lock, as serialization is taken care in * namespace.c */ int follow_down(struct vfsmount **mnt, struct dentry **dentry) { struct vfsmount *mounted; mounted = lookup_mnt(*mnt, *dentry); if (mounted) { dput(*dentry); mntput(*mnt); *mnt = mounted; *dentry = dget(mounted->mnt_root); return 1; } return 0; } static __always_inline void follow_dotdot(struct nameidata *nd) { while(1) { struct vfsmount *parent; struct dentry *old = nd->dentry; read_lock(&current->fs->lock); if (nd->dentry == current->fs->root && nd->mnt == current->fs->rootmnt) { read_unlock(&current->fs->lock); break; } read_unlock(&current->fs->lock); spin_lock(&dcache_lock); if (nd->dentry != nd->mnt->mnt_root) { nd->dentry = dget(nd->dentry->d_parent); spin_unlock(&dcache_lock); dput(old); break; } spin_unlock(&dcache_lock); spin_lock(&vfsmount_lock); parent = nd->mnt->mnt_parent; if (parent == nd->mnt) { spin_unlock(&vfsmount_lock); break; } mntget(parent); nd->dentry = dget(nd->mnt->mnt_mountpoint); spin_unlock(&vfsmount_lock); dput(old); mntput(nd->mnt); nd->mnt = parent; } follow_mount(&nd->mnt, &nd->dentry); } /* * It's more convoluted than I'd like it to be, but... it's still fairly * small and for now I'd prefer to have fast path as straight as possible. * It _is_ time-critical. */ static int do_lookup(struct nameidata *nd, struct qstr *name, struct path *path) { struct vfsmount *mnt = nd->mnt; struct dentry *dentry = __d_lookup(nd->dentry, name); if (!dentry) goto need_lookup; if (dentry->d_op && dentry->d_op->d_revalidate) goto need_revalidate; done: path->mnt = mnt; path->dentry = dentry; __follow_mount(path); return 0; need_lookup: dentry = real_lookup(nd->dentry, name, nd); if (IS_ERR(dentry)) goto fail; goto done; need_revalidate: if (dentry->d_op->d_revalidate(dentry, nd)) goto done; if (d_invalidate(dentry)) goto done; dput(dentry); goto need_lookup; fail: return PTR_ERR(dentry); } /* * Name resolution. * This is the basic name resolution function, turning a pathname into * the final dentry. We expect 'base' to be positive and a directory. * * Returns 0 and nd will have valid dentry and mnt on success. * Returns error and drops reference to input namei data on failure. */ static fastcall int __link_path_walk(const char * name, struct nameidata *nd) { struct path next; struct inode *inode; int err; unsigned int lookup_flags = nd->flags; while (*name=='/') name++; if (!*name) goto return_reval; inode = nd->dentry->d_inode; if (nd->depth) lookup_flags = LOOKUP_FOLLOW; /* At this point we know we have a real path component. */ for(;;) { unsigned long hash; struct qstr this; unsigned int c; nd->flags |= LOOKUP_CONTINUE; err = exec_permission_lite(inode, nd); if (err == -EAGAIN) err = vfs_permission(nd, MAY_EXEC); if (err) break; this.name = name; c = *(const unsigned char *)name; hash = init_name_hash(); do { name++; hash = partial_name_hash(c, hash); c = *(const unsigned char *)name; } while (c && (c != '/')); this.len = name - (const char *) this.name; this.hash = end_name_hash(hash); /* remove trailing slashes? */ if (!c) goto last_component; while (*++name == '/'); if (!*name) goto last_with_slashes; /* * "." and ".." are special - ".." especially so because it has * to be able to know about the current root directory and * parent relationships. */ if (this.name[0] == '.') switch (this.len) { default: break; case 2: if (this.name[1] != '.') break; follow_dotdot(nd); inode = nd->dentry->d_inode; /* fallthrough */ case 1: continue; } /* * See if the low-level filesystem might want * to use its own hash.. */ if (nd->dentry->d_op && nd->dentry->d_op->d_hash) { err = nd->dentry->d_op->d_hash(nd->dentry, &this); if (err < 0) break; } /* This does the actual lookups.. */ err = do_lookup(nd, &this, &next); if (err) break; err = -ENOENT; inode = next.dentry->d_inode; if (!inode) goto out_dput; err = -ENOTDIR; if (!inode->i_op) goto out_dput; if (inode->i_op->follow_link) { err = do_follow_link(&next, nd); if (err) goto return_err; err = -ENOENT; inode = nd->dentry->d_inode; if (!inode) break; err = -ENOTDIR; if (!inode->i_op) break; } else path_to_nameidata(&next, nd); err = -ENOTDIR; if (!inode->i_op->lookup) break; continue; /* here ends the main loop */ last_with_slashes: lookup_flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY; last_component: nd->flags &= ~LOOKUP_CONTINUE; if (lookup_flags & LOOKUP_PARENT) goto lookup_parent; if (this.name[0] == '.') switch (this.len) { default: break; case 2: if (this.name[1] != '.') break; follow_dotdot(nd); inode = nd->dentry->d_inode; /* fallthrough */ case 1: goto return_reval; } if (nd->dentry->d_op && nd->dentry->d_op->d_hash) { err = nd->dentry->d_op->d_hash(nd->dentry, &this); if (err < 0) break; } err = do_lookup(nd, &this, &next); if (err) break; inode = next.dentry->d_inode; if ((lookup_flags & LOOKUP_FOLLOW) && inode && inode->i_op && inode->i_op->follow_link) { err = do_follow_link(&next, nd); if (err) goto return_err; inode = nd->dentry->d_inode; } else path_to_nameidata(&next, nd); err = -ENOENT; if (!inode) break; if (lookup_flags & LOOKUP_DIRECTORY) { err = -ENOTDIR; if (!inode->i_op || !inode->i_op->lookup) break; } goto return_base; lookup_parent: nd->last = this; nd->last_type = LAST_NORM; if (this.name[0] != '.') goto return_base; if (this.len == 1) nd->last_type = LAST_DOT; else if (this.len == 2 && this.name[1] == '.') nd->last_type = LAST_DOTDOT; else goto return_base; return_reval: /* * We bypassed the ordinary revalidation routines. * We may need to check the cached dentry for staleness. */ if (nd->dentry && nd->dentry->d_sb && (nd->dentry->d_sb->s_type->fs_flags & FS_REVAL_DOT)) { err = -ESTALE; /* Note: we do not d_invalidate() */ if (!nd->dentry->d_op->d_revalidate(nd->dentry, nd)) break; } return_base: return 0; out_dput: dput_path(&next, nd); break; } path_release(nd); return_err: return err; } /* * Wrapper to retry pathname resolution whenever the underlying * file system returns an ESTALE. * * Retry the whole path once, forcing real lookup requests * instead of relying on the dcache. */ int fastcall link_path_walk(const char *name, struct nameidata *nd) { struct nameidata save = *nd; int result; /* make sure the stuff we saved doesn't go away */ dget(save.dentry); mntget(save.mnt); result = __link_path_walk(name, nd); if (result == -ESTALE) { *nd = save; dget(nd->dentry); mntget(nd->mnt); nd->flags |= LOOKUP_REVAL; result = __link_path_walk(name, nd); } dput(save.dentry); mntput(save.mnt); return result; } int fastcall path_walk(const char * name, struct nameidata *nd) { current->total_link_count = 0; return link_path_walk(name, nd); } /* * SMP-safe: Returns 1 and nd will have valid dentry and mnt, if * everything is done. Returns 0 and drops input nd, if lookup failed; */ static int __emul_lookup_dentry(const char *name, struct nameidata *nd) { if (path_walk(name, nd)) return 0; /* something went wrong... */ if (!nd->dentry->d_inode || S_ISDIR(nd->dentry->d_inode->i_mode)) { struct dentry *old_dentry = nd->dentry; struct vfsmount *old_mnt = nd->mnt; struct qstr last = nd->last; int last_type = nd->last_type; /* * NAME was not found in alternate root or it's a directory. Try to find * it in the normal root: */ nd->last_type = LAST_ROOT; read_lock(&current->fs->lock); nd->mnt = mntget(current->fs->rootmnt); nd->dentry = dget(current->fs->root); read_unlock(&current->fs->lock); if (path_walk(name, nd) == 0) { if (nd->dentry->d_inode) { dput(old_dentry); mntput(old_mnt); return 1; } path_release(nd); } nd->dentry = old_dentry; nd->mnt = old_mnt; nd->last = last; nd->last_type = last_type; } return 1; } void set_fs_altroot(void) { char *emul = __emul_prefix(); struct nameidata nd; struct vfsmount *mnt = NULL, *oldmnt; struct dentry *dentry = NULL, *olddentry; int err; if (!emul) goto set_it; err = path_lookup(emul, LOOKUP_FOLLOW|LOOKUP_DIRECTORY|LOOKUP_NOALT, &nd); if (!err) { mnt = nd.mnt; dentry = nd.dentry; } set_it: write_lock(&current->fs->lock); oldmnt = current->fs->altrootmnt; olddentry = current->fs->altroot; current->fs->altrootmnt = mnt; current->fs->altroot = dentry; write_unlock(&current->fs->lock); if (olddentry) { dput(olddentry); mntput(oldmnt); } } /* Returns 0 and nd will be valid on success; Retuns error, otherwise. */ static int fastcall do_path_lookup(int dfd, const char *name, unsigned int flags, struct nameidata *nd) { int retval = 0; nd->last_type = LAST_ROOT; /* if there are only slashes... */ nd->flags = flags; nd->depth = 0; read_lock(&current->fs->lock); if (*name=='/') { if (current->fs->altroot && !(nd->flags & LOOKUP_NOALT)) { nd->mnt = mntget(current->fs->altrootmnt); nd->dentry = dget(current->fs->altroot); read_unlock(&current->fs->lock); if (__emul_lookup_dentry(name,nd)) goto out; /* found in altroot */ read_lock(&current->fs->lock); } nd->mnt = mntget(current->fs->rootmnt); nd->dentry = dget(current->fs->root); } else if (dfd == AT_FDCWD) { nd->mnt = mntget(current->fs->pwdmnt); nd->dentry = dget(current->fs->pwd); } else { struct file *file; int fput_needed; struct dentry *dentry; file = fget_light(dfd, &fput_needed); if (!file) { retval = -EBADF; goto out_fail; } dentry = file->f_dentry; if (!S_ISDIR(dentry->d_inode->i_mode)) { retval = -ENOTDIR; fput_light(file, fput_needed); goto out_fail; } retval = file_permission(file, MAY_EXEC); if (retval) { fput_light(file, fput_needed); goto out_fail; } nd->mnt = mntget(file->f_vfsmnt); nd->dentry = dget(dentry); fput_light(file, fput_needed); } read_unlock(&current->fs->lock); current->total_link_count = 0; retval = link_path_walk(name, nd); out: if (unlikely(current->audit_context && nd && nd->dentry && nd->dentry->d_inode)) audit_inode(name, nd->dentry->d_inode, flags); out_fail: return retval; } int fastcall path_lookup(const char *name, unsigned int flags, struct nameidata *nd) { return do_path_lookup(AT_FDCWD, name, flags, nd); } static int __path_lookup_intent_open(int dfd, const char *name, unsigned int lookup_flags, struct nameidata *nd, int open_flags, int create_mode) { struct file *filp = get_empty_filp(); int err; if (filp == NULL) return -ENFILE; nd->intent.open.file = filp; nd->intent.open.flags = open_flags; nd->intent.open.create_mode = create_mode; err = do_path_lookup(dfd, name, lookup_flags|LOOKUP_OPEN, nd); if (IS_ERR(nd->intent.open.file)) { if (err == 0) { err = PTR_ERR(nd->intent.open.file); path_release(nd); } } else if (err != 0) release_open_intent(nd); return err; } /** * path_lookup_open - lookup a file path with open intent * @dfd: the directory to use as base, or AT_FDCWD * @name: pointer to file name * @lookup_flags: lookup intent flags * @nd: pointer to nameidata * @open_flags: open intent flags */ int path_lookup_open(int dfd, const char *name, unsigned int lookup_flags, struct nameidata *nd, int open_flags) { return __path_lookup_intent_open(dfd, name, lookup_flags, nd, open_flags, 0); } /** * path_lookup_create - lookup a file path with open + create intent * @dfd: the directory to use as base, or AT_FDCWD * @name: pointer to file name * @lookup_flags: lookup intent flags * @nd: pointer to nameidata * @open_flags: open intent flags * @create_mode: create intent flags */ static int path_lookup_create(int dfd, const char *name, unsigned int lookup_flags, struct nameidata *nd, int open_flags, int create_mode) { return __path_lookup_intent_open(dfd, name, lookup_flags|LOOKUP_CREATE, nd, open_flags, create_mode); } int __user_path_lookup_open(const char __user *name, unsigned int lookup_flags, struct nameidata *nd, int open_flags) { char *tmp = getname(name); int err = PTR_ERR(tmp); if (!IS_ERR(tmp)) { err = __path_lookup_intent_open(AT_FDCWD, tmp, lookup_flags, nd, open_flags, 0); putname(tmp); } return err; } /* * Restricted form of lookup. Doesn't follow links, single-component only, * needs parent already locked. Doesn't follow mounts. * SMP-safe. */ static struct dentry * __lookup_hash(struct qstr *name, struct dentry * base, struct nameidata *nd) { struct dentry * dentry; struct inode *inode; int err; inode = base->d_inode; err = permission(inode, MAY_EXEC, nd); dentry = ERR_PTR(err); if (err) goto out; /* * See if the low-level filesystem might want * to use its own hash.. */ if (base->d_op && base->d_op->d_hash) { err = base->d_op->d_hash(base, name); dentry = ERR_PTR(err); if (err < 0) goto out; } dentry = cached_lookup(base, name, nd); if (!dentry) { struct dentry *new = d_alloc(base, name); dentry = ERR_PTR(-ENOMEM); if (!new) goto out; dentry = inode->i_op->lookup(inode, new, nd); if (!dentry) dentry = new; else dput(new); } out: return dentry; } struct dentry * lookup_hash(struct nameidata *nd) { return __lookup_hash(&nd->last, nd->dentry, nd); } /* SMP-safe */ struct dentry * lookup_one_len(const char * name, struct dentry * base, int len) { unsigned long hash; struct qstr this; unsigned int c; this.name = name; this.len = len; if (!len) goto access; hash = init_name_hash(); while (len--) { c = *(const unsigned char *)name++; if (c == '/' || c == '\0') goto access; hash = partial_name_hash(c, hash); } this.hash = end_name_hash(hash); return __lookup_hash(&this, base, NULL); access: return ERR_PTR(-EACCES); } /* * namei() * * is used by most simple commands to get the inode of a specified name. * Open, link etc use their own routines, but this is enough for things * like 'chmod' etc. * * namei exists in two versions: namei/lnamei. The only difference is * that namei follows links, while lnamei does not. * SMP-safe */ int fastcall __user_walk_fd(int dfd, const char __user *name, unsigned flags, struct nameidata *nd) { char *tmp = getname(name); int err = PTR_ERR(tmp); if (!IS_ERR(tmp)) { err = do_path_lookup(dfd, tmp, flags, nd); putname(tmp); } return err; } int fastcall __user_walk(const char __user *name, unsigned flags, struct nameidata *nd) { return __user_walk_fd(AT_FDCWD, name, flags, nd); } /* * It's inline, so penalty for filesystems that don't use sticky bit is * minimal. */ static inline int check_sticky(struct inode *dir, struct inode *inode) { if (!(dir->i_mode & S_ISVTX)) return 0; if (inode->i_uid == current->fsuid) return 0; if (dir->i_uid == current->fsuid) return 0; return !capable(CAP_FOWNER); } /* * Check whether we can remove a link victim from directory dir, check * whether the type of victim is right. * 1. We can't do it if dir is read-only (done in permission()) * 2. We should have write and exec permissions on dir * 3. We can't remove anything from append-only dir * 4. We can't do anything with immutable dir (done in permission()) * 5. If the sticky bit on dir is set we should either * a. be owner of dir, or * b. be owner of victim, or * c. have CAP_FOWNER capability * 6. If the victim is append-only or immutable we can't do antyhing with * links pointing to it. * 7. If we were asked to remove a directory and victim isn't one - ENOTDIR. * 8. If we were asked to remove a non-directory and victim isn't one - EISDIR. * 9. We can't remove a root or mountpoint. * 10. We don't allow removal of NFS sillyrenamed files; it's handled by * nfs_async_unlink(). */ static int may_delete(struct inode *dir,struct dentry *victim,int isdir) { int error; if (!victim->d_inode) return -ENOENT; BUG_ON(victim->d_parent->d_inode != dir); error = permission(dir,MAY_WRITE | MAY_EXEC, NULL); if (error) return error; if (IS_APPEND(dir)) return -EPERM; if (check_sticky(dir, victim->d_inode)||IS_APPEND(victim->d_inode)|| IS_IMMUTABLE(victim->d_inode)) return -EPERM; if (isdir) { if (!S_ISDIR(victim->d_inode->i_mode)) return -ENOTDIR; if (IS_ROOT(victim)) return -EBUSY; } else if (S_ISDIR(victim->d_inode->i_mode)) return -EISDIR; if (IS_DEADDIR(dir)) return -ENOENT; if (victim->d_flags & DCACHE_NFSFS_RENAMED) return -EBUSY; return 0; } /* Check whether we can create an object with dentry child in directory * dir. * 1. We can't do it if child already exists (open has special treatment for * this case, but since we are inlined it's OK) * 2. We can't do it if dir is read-only (done in permission()) * 3. We should have write and exec permissions on dir * 4. We can't do it if dir is immutable (done in permission()) */ static inline int may_create(struct inode *dir, struct dentry *child, struct nameidata *nd) { if (child->d_inode) return -EEXIST; if (IS_DEADDIR(dir)) return -ENOENT; return permission(dir,MAY_WRITE | MAY_EXEC, nd); } /* * O_DIRECTORY translates into forcing a directory lookup. */ static inline int lookup_flags(unsigned int f) { unsigned long retval = LOOKUP_FOLLOW; if (f & O_NOFOLLOW) retval &= ~LOOKUP_FOLLOW; if (f & O_DIRECTORY) retval |= LOOKUP_DIRECTORY; return retval; } /* * p1 and p2 should be directories on the same fs. */ struct dentry *lock_rename(struct dentry *p1, struct dentry *p2) { struct dentry *p; if (p1 == p2) { mutex_lock(&p1->d_inode->i_mutex); return NULL; } down(&p1->d_inode->i_sb->s_vfs_rename_sem); for (p = p1; p->d_parent != p; p = p->d_parent) { if (p->d_parent == p2) { mutex_lock(&p2->d_inode->i_mutex); mutex_lock(&p1->d_inode->i_mutex); return p; } } for (p = p2; p->d_parent != p; p = p->d_parent) { if (p->d_parent == p1) { mutex_lock(&p1->d_inode->i_mutex); mutex_lock(&p2->d_inode->i_mutex); return p; } } mutex_lock(&p1->d_inode->i_mutex); mutex_lock(&p2->d_inode->i_mutex); return NULL; } void unlock_rename(struct dentry *p1, struct dentry *p2) { mutex_unlock(&p1->d_inode->i_mutex); if (p1 != p2) { mutex_unlock(&p2->d_inode->i_mutex); up(&p1->d_inode->i_sb->s_vfs_rename_sem); } } int vfs_create(struct inode *dir, struct dentry *dentry, int mode, struct nameidata *nd) { int error = may_create(dir, dentry, nd); if (error) return error; if (!dir->i_op || !dir->i_op->create) return -EACCES; /* shouldn't it be ENOSYS? */ mode &= S_IALLUGO; mode |= S_IFREG; error = security_inode_create(dir, dentry, mode); if (error) return error; DQUOT_INIT(dir); error = dir->i_op->create(dir, dentry, mode, nd); if (!error) fsnotify_create(dir, dentry->d_name.name); return error; } int may_open(struct nameidata *nd, int acc_mode, int flag) { struct dentry *dentry = nd->dentry; struct inode *inode = dentry->d_inode; int error; if (!inode) return -ENOENT; if (S_ISLNK(inode->i_mode)) return -ELOOP; if (S_ISDIR(inode->i_mode) && (flag & FMODE_WRITE)) return -EISDIR; error = vfs_permission(nd, acc_mode); if (error) return error; /* * FIFO's, sockets and device files are special: they don't * actually live on the filesystem itself, and as such you * can write to them even if the filesystem is read-only. */ if (S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) { flag &= ~O_TRUNC; } else if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode)) { if (nd->mnt->mnt_flags & MNT_NODEV) return -EACCES; flag &= ~O_TRUNC; } else if (IS_RDONLY(inode) && (flag & FMODE_WRITE)) return -EROFS; /* * An append-only file must be opened in append mode for writing. */ if (IS_APPEND(inode)) { if ((flag & FMODE_WRITE) && !(flag & O_APPEND)) return -EPERM; if (flag & O_TRUNC) return -EPERM; } /* O_NOATIME can only be set by the owner or superuser */ if (flag & O_NOATIME) if (current->fsuid != inode->i_uid && !capable(CAP_FOWNER)) return -EPERM; /* * Ensure there are no outstanding leases on the file. */ error = break_lease(inode, flag); if (error) return error; if (flag & O_TRUNC) { error = get_write_access(inode); if (error) return error; /* * Refuse to truncate files with mandatory locks held on them. */ error = locks_verify_locked(inode); if (!error) { DQUOT_INIT(inode); error = do_truncate(dentry, 0, ATTR_MTIME|ATTR_CTIME, NULL); } put_write_access(inode); if (error) return error; } else if (flag & FMODE_WRITE) DQUOT_INIT(inode); return 0; } /* * open_namei() * * namei for open - this is in fact almost the whole open-routine. * * Note that the low bits of "flag" aren't the same as in the open * system call - they are 00 - no permissions needed * 01 - read permission needed * 10 - write permission needed * 11 - read/write permissions needed * which is a lot more logical, and also allows the "no perm" needed * for symlinks (where the permissions are checked later). * SMP-safe */ int open_namei(int dfd, const char *pathname, int flag, int mode, struct nameidata *nd) { int acc_mode, error; struct path path; struct dentry *dir; int count = 0; acc_mode = ACC_MODE(flag); /* O_TRUNC implies we need access checks for write permissions */ if (flag & O_TRUNC) acc_mode |= MAY_WRITE; /* Allow the LSM permission hook to distinguish append access from general write access. */ if (flag & O_APPEND) acc_mode |= MAY_APPEND; /* * The simplest case - just a plain lookup. */ if (!(flag & O_CREAT)) { error = path_lookup_open(dfd, pathname, lookup_flags(flag), nd, flag); if (error) return error; goto ok; } /* * Create - we need to know the parent. */ error = path_lookup_create(dfd,pathname,LOOKUP_PARENT,nd,flag,mode); if (error) return error; /* * We have the parent and last component. First of all, check * that we are not asked to creat(2) an obvious directory - that * will not do. */ error = -EISDIR; if (nd->last_type != LAST_NORM || nd->last.name[nd->last.len]) goto exit; dir = nd->dentry; nd->flags &= ~LOOKUP_PARENT; mutex_lock(&dir->d_inode->i_mutex); path.dentry = lookup_hash(nd); path.mnt = nd->mnt; do_last: error = PTR_ERR(path.dentry); if (IS_ERR(path.dentry)) { mutex_unlock(&dir->d_inode->i_mutex); goto exit; } /* Negative dentry, just create the file */ if (!path.dentry->d_inode) { if (!IS_POSIXACL(dir->d_inode)) mode &= ~current->fs->umask; error = vfs_create(dir->d_inode, path.dentry, mode, nd); mutex_unlock(&dir->d_inode->i_mutex); dput(nd->dentry); nd->dentry = path.dentry; if (error) goto exit; /* Don't check for write permission, don't truncate */ acc_mode = 0; flag &= ~O_TRUNC; goto ok; } /* * It already exists. */ mutex_unlock(&dir->d_inode->i_mutex); error = -EEXIST; if (flag & O_EXCL) goto exit_dput; if (__follow_mount(&path)) { error = -ELOOP; if (flag & O_NOFOLLOW) goto exit_dput; } error = -ENOENT; if (!path.dentry->d_inode) goto exit_dput; if (path.dentry->d_inode->i_op && path.dentry->d_inode->i_op->follow_link) goto do_link; path_to_nameidata(&path, nd); error = -EISDIR; if (path.dentry->d_inode && S_ISDIR(path.dentry->d_inode->i_mode)) goto exit; ok: error = may_open(nd, acc_mode, flag); if (error) goto exit; return 0; exit_dput: dput_path(&path, nd); exit: if (!IS_ERR(nd->intent.open.file)) release_open_intent(nd); path_release(nd); return error; do_link: error = -ELOOP; if (flag & O_NOFOLLOW) goto exit_dput; /* * This is subtle. Instead of calling do_follow_link() we do the * thing by hands. The reason is that this way we have zero link_count * and path_walk() (called from ->follow_link) honoring LOOKUP_PARENT. * After that we have the parent and last component, i.e. * we are in the same situation as after the first path_walk(). * Well, almost - if the last component is normal we get its copy * stored in nd->last.name and we will have to putname() it when we * are done. Procfs-like symlinks just set LAST_BIND. */ nd->flags |= LOOKUP_PARENT; error = security_inode_follow_link(path.dentry, nd); if (error) goto exit_dput; error = __do_follow_link(&path, nd); if (error) return error; nd->flags &= ~LOOKUP_PARENT; if (nd->last_type == LAST_BIND) goto ok; error = -EISDIR; if (nd->last_type != LAST_NORM) goto exit; if (nd->last.name[nd->last.len]) { __putname(nd->last.name); goto exit; } error = -ELOOP; if (count++==32) { __putname(nd->last.name); goto exit; } dir = nd->dentry; mutex_lock(&dir->d_inode->i_mutex); path.dentry = lookup_hash(nd); path.mnt = nd->mnt; __putname(nd->last.name); goto do_last; } /** * lookup_create - lookup a dentry, creating it if it doesn't exist * @nd: nameidata info * @is_dir: directory flag * * Simple function to lookup and return a dentry and create it * if it doesn't exist. Is SMP-safe. * * Returns with nd->dentry->d_inode->i_mutex locked. */ struct dentry *lookup_create(struct nameidata *nd, int is_dir) { struct dentry *dentry = ERR_PTR(-EEXIST); mutex_lock(&nd->dentry->d_inode->i_mutex); /* * Yucky last component or no last component at all? * (foo/., foo/.., /////) */ if (nd->last_type != LAST_NORM) goto fail; nd->flags &= ~LOOKUP_PARENT; /* * Do the final lookup. */ dentry = lookup_hash(nd); if (IS_ERR(dentry)) goto fail; /* * Special case - lookup gave negative, but... we had foo/bar/ * From the vfs_mknod() POV we just have a negative dentry - * all is fine. Let's be bastards - you had / on the end, you've * been asking for (non-existent) directory. -ENOENT for you. */ if (!is_dir && nd->last.name[nd->last.len] && !dentry->d_inode) goto enoent; return dentry; enoent: dput(dentry); dentry = ERR_PTR(-ENOENT); fail: return dentry; } EXPORT_SYMBOL_GPL(lookup_create); int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev) { int error = may_create(dir, dentry, NULL); if (error) return error; if ((S_ISCHR(mode) || S_ISBLK(mode)) && !capable(CAP_MKNOD)) return -EPERM; if (!dir->i_op || !dir->i_op->mknod) return -EPERM; error = security_inode_mknod(dir, dentry, mode, dev); if (error) return error; DQUOT_INIT(dir); error = dir->i_op->mknod(dir, dentry, mode, dev); if (!error) fsnotify_create(dir, dentry->d_name.name); return error; } asmlinkage long sys_mknodat(int dfd, const char __user *filename, int mode, unsigned dev) { int error = 0; char * tmp; struct dentry * dentry; struct nameidata nd; if (S_ISDIR(mode)) return -EPERM; tmp = getname(filename); if (IS_ERR(tmp)) return PTR_ERR(tmp); error = do_path_lookup(dfd, tmp, LOOKUP_PARENT, &nd); if (error) goto out; dentry = lookup_create(&nd, 0); error = PTR_ERR(dentry); if (!IS_POSIXACL(nd.dentry->d_inode)) mode &= ~current->fs->umask; if (!IS_ERR(dentry)) { switch (mode & S_IFMT) { case 0: case S_IFREG: error = vfs_create(nd.dentry->d_inode,dentry,mode,&nd); break; case S_IFCHR: case S_IFBLK: error = vfs_mknod(nd.dentry->d_inode,dentry,mode, new_decode_dev(dev)); break; case S_IFIFO: case S_IFSOCK: error = vfs_mknod(nd.dentry->d_inode,dentry,mode,0); break; case S_IFDIR: error = -EPERM; break; default: error = -EINVAL; } dput(dentry); } mutex_unlock(&nd.dentry->d_inode->i_mutex); path_release(&nd); out: putname(tmp); return error; } asmlinkage long sys_mknod(const char __user *filename, int mode, unsigned dev) { return sys_mknodat(AT_FDCWD, filename, mode, dev); } int vfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) { int error = may_create(dir, dentry, NULL); if (error) return error; if (!dir->i_op || !dir->i_op->mkdir) return -EPERM; mode &= (S_IRWXUGO|S_ISVTX); error = security_inode_mkdir(dir, dentry, mode); if (error) return error; DQUOT_INIT(dir); error = dir->i_op->mkdir(dir, dentry, mode); if (!error) fsnotify_mkdir(dir, dentry->d_name.name); return error; } asmlinkage long sys_mkdirat(int dfd, const char __user *pathname, int mode) { int error = 0; char * tmp; tmp = getname(pathname); error = PTR_ERR(tmp); if (!IS_ERR(tmp)) { struct dentry *dentry; struct nameidata nd; error = do_path_lookup(dfd, tmp, LOOKUP_PARENT, &nd); if (error) goto out; dentry = lookup_create(&nd, 1); error = PTR_ERR(dentry); if (!IS_ERR(dentry)) { if (!IS_POSIXACL(nd.dentry->d_inode)) mode &= ~current->fs->umask; error = vfs_mkdir(nd.dentry->d_inode, dentry, mode); dput(dentry); } mutex_unlock(&nd.dentry->d_inode->i_mutex); path_release(&nd); out: putname(tmp); } return error; } asmlinkage long sys_mkdir(const char __user *pathname, int mode) { return sys_mkdirat(AT_FDCWD, pathname, mode); } /* * We try to drop the dentry early: we should have * a usage count of 2 if we're the only user of this * dentry, and if that is true (possibly after pruning * the dcache), then we drop the dentry now. * * A low-level filesystem can, if it choses, legally * do a * * if (!d_unhashed(dentry)) * return -EBUSY; * * if it cannot handle the case of removing a directory * that is still in use by something else.. */ void dentry_unhash(struct dentry *dentry) { dget(dentry); if (atomic_read(&dentry->d_count)) shrink_dcache_parent(dentry); spin_lock(&dcache_lock); spin_lock(&dentry->d_lock); if (atomic_read(&dentry->d_count) == 2) __d_drop(dentry); spin_unlock(&dentry->d_lock); spin_unlock(&dcache_lock); } int vfs_rmdir(struct inode *dir, struct dentry *dentry) { int error = may_delete(dir, dentry, 1); if (error) return error; if (!dir->i_op || !dir->i_op->rmdir) return -EPERM; DQUOT_INIT(dir); mutex_lock(&dentry->d_inode->i_mutex); dentry_unhash(dentry); if (d_mountpoint(dentry)) error = -EBUSY; else { error = security_inode_rmdir(dir, dentry); if (!error) { error = dir->i_op->rmdir(dir, dentry); if (!error) dentry->d_inode->i_flags |= S_DEAD; } } mutex_unlock(&dentry->d_inode->i_mutex); if (!error) { d_delete(dentry); } dput(dentry); return error; } static long do_rmdir(int dfd, const char __user *pathname) { int error = 0; char * name; struct dentry *dentry; struct nameidata nd; name = getname(pathname); if(IS_ERR(name)) return PTR_ERR(name); error = do_path_lookup(dfd, name, LOOKUP_PARENT, &nd); if (error) goto exit; switch(nd.last_type) { case LAST_DOTDOT: error = -ENOTEMPTY; goto exit1; case LAST_DOT: error = -EINVAL; goto exit1; case LAST_ROOT: error = -EBUSY; goto exit1; } mutex_lock(&nd.dentry->d_inode->i_mutex); dentry = lookup_hash(&nd); error = PTR_ERR(dentry); if (!IS_ERR(dentry)) { error = vfs_rmdir(nd.dentry->d_inode, dentry); dput(dentry); } mutex_unlock(&nd.dentry->d_inode->i_mutex); exit1: path_release(&nd); exit: putname(name); return error; } asmlinkage long sys_rmdir(const char __user *pathname) { return do_rmdir(AT_FDCWD, pathname); } int vfs_unlink(struct inode *dir, struct dentry *dentry) { int error = may_delete(dir, dentry, 0); if (error) return error; if (!dir->i_op || !dir->i_op->unlink) return -EPERM; DQUOT_INIT(dir); mutex_lock(&dentry->d_inode->i_mutex); if (d_mountpoint(dentry)) error = -EBUSY; else { error = security_inode_unlink(dir, dentry); if (!error) error = dir->i_op->unlink(dir, dentry); } mutex_unlock(&dentry->d_inode->i_mutex); /* We don't d_delete() NFS sillyrenamed files--they still exist. */ if (!error && !(dentry->d_flags & DCACHE_NFSFS_RENAMED)) { d_delete(dentry); } return error; } /* * Make sure that the actual truncation of the file will occur outside its * directory's i_mutex. Truncate can take a long time if there is a lot of * writeout happening, and we don't want to prevent access to the directory * while waiting on the I/O. */ static long do_unlinkat(int dfd, const char __user *pathname) { int error = 0; char * name; struct dentry *dentry; struct nameidata nd; struct inode *inode = NULL; name = getname(pathname); if(IS_ERR(name)) return PTR_ERR(name); error = do_path_lookup(dfd, name, LOOKUP_PARENT, &nd); if (error) goto exit; error = -EISDIR; if (nd.last_type != LAST_NORM) goto exit1; mutex_lock(&nd.dentry->d_inode->i_mutex); dentry = lookup_hash(&nd); error = PTR_ERR(dentry); if (!IS_ERR(dentry)) { /* Why not before? Because we want correct error value */ if (nd.last.name[nd.last.len]) goto slashes; inode = dentry->d_inode; if (inode) atomic_inc(&inode->i_count); error = vfs_unlink(nd.dentry->d_inode, dentry); exit2: dput(dentry); } mutex_unlock(&nd.dentry->d_inode->i_mutex); if (inode) iput(inode); /* truncate the inode here */ exit1: path_release(&nd); exit: putname(name); return error; slashes: error = !dentry->d_inode ? -ENOENT : S_ISDIR(dentry->d_inode->i_mode) ? -EISDIR : -ENOTDIR; goto exit2; } asmlinkage long sys_unlinkat(int dfd, const char __user *pathname, int flag) { if ((flag & ~AT_REMOVEDIR) != 0) return -EINVAL; if (flag & AT_REMOVEDIR) return do_rmdir(dfd, pathname); return do_unlinkat(dfd, pathname); } asmlinkage long sys_unlink(const char __user *pathname) { return do_unlinkat(AT_FDCWD, pathname); } int vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname, int mode) { int error = may_create(dir, dentry, NULL); if (error) return error; if (!dir->i_op || !dir->i_op->symlink) return -EPERM; error = security_inode_symlink(dir, dentry, oldname); if (error) return error; DQUOT_INIT(dir); error = dir->i_op->symlink(dir, dentry, oldname); if (!error) fsnotify_create(dir, dentry->d_name.name); return error; } asmlinkage long sys_symlinkat(const char __user *oldname, int newdfd, const char __user *newname) { int error = 0; char * from; char * to; from = getname(oldname); if(IS_ERR(from)) return PTR_ERR(from); to = getname(newname); error = PTR_ERR(to); if (!IS_ERR(to)) { struct dentry *dentry; struct nameidata nd; error = do_path_lookup(newdfd, to, LOOKUP_PARENT, &nd); if (error) goto out; dentry = lookup_create(&nd, 0); error = PTR_ERR(dentry); if (!IS_ERR(dentry)) { error = vfs_symlink(nd.dentry->d_inode, dentry, from, S_IALLUGO); dput(dentry); } mutex_unlock(&nd.dentry->d_inode->i_mutex); path_release(&nd); out: putname(to); } putname(from); return error; } asmlinkage long sys_symlink(const char __user *oldname, const char __user *newname) { return sys_symlinkat(oldname, AT_FDCWD, newname); } int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry) { struct inode *inode = old_dentry->d_inode; int error; if (!inode) return -ENOENT; error = may_create(dir, new_dentry, NULL); if (error) return error; if (dir->i_sb != inode->i_sb) return -EXDEV; /* * A link to an append-only or immutable file cannot be created. */ if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) return -EPERM; if (!dir->i_op || !dir->i_op->link) return -EPERM; if (S_ISDIR(old_dentry->d_inode->i_mode)) return -EPERM; error = security_inode_link(old_dentry, dir, new_dentry); if (error) return error; mutex_lock(&old_dentry->d_inode->i_mutex); DQUOT_INIT(dir); error = dir->i_op->link(old_dentry, dir, new_dentry); mutex_unlock(&old_dentry->d_inode->i_mutex); if (!error) fsnotify_create(dir, new_dentry->d_name.name); return error; } /* * Hardlinks are often used in delicate situations. We avoid * security-related surprises by not following symlinks on the * newname. --KAB * * We don't follow them on the oldname either to be compatible * with linux 2.0, and to avoid hard-linking to directories * and other special files. --ADM */ asmlinkage long sys_linkat(int olddfd, const char __user *oldname, int newdfd, const char __user *newname) { struct dentry *new_dentry; struct nameidata nd, old_nd; int error; char * to; to = getname(newname); if (IS_ERR(to)) return PTR_ERR(to); error = __user_walk_fd(olddfd, oldname, 0, &old_nd); if (error) goto exit; error = do_path_lookup(newdfd, to, LOOKUP_PARENT, &nd); if (error) goto out; error = -EXDEV; if (old_nd.mnt != nd.mnt) goto out_release; new_dentry = lookup_create(&nd, 0); error = PTR_ERR(new_dentry); if (!IS_ERR(new_dentry)) { error = vfs_link(old_nd.dentry, nd.dentry->d_inode, new_dentry); dput(new_dentry); } mutex_unlock(&nd.dentry->d_inode->i_mutex); out_release: path_release(&nd); out: path_release(&old_nd); exit: putname(to); return error; } asmlinkage long sys_link(const char __user *oldname, const char __user *newname) { return sys_linkat(AT_FDCWD, oldname, AT_FDCWD, newname); } /* * The worst of all namespace operations - renaming directory. "Perverted" * doesn't even start to describe it. Somebody in UCB had a heck of a trip... * Problems: * a) we can get into loop creation. Check is done in is_subdir(). * b) race potential - two innocent renames can create a loop together. * That's where 4.4 screws up. Current fix: serialization on * sb->s_vfs_rename_sem. We might be more accurate, but that's another * story. * c) we have to lock _three_ objects - parents and victim (if it exists). * And that - after we got ->i_mutex on parents (until then we don't know * whether the target exists). Solution: try to be smart with locking * order for inodes. We rely on the fact that tree topology may change * only under ->s_vfs_rename_sem _and_ that parent of the object we * move will be locked. Thus we can rank directories by the tree * (ancestors first) and rank all non-directories after them. * That works since everybody except rename does "lock parent, lookup, * lock child" and rename is under ->s_vfs_rename_sem. * HOWEVER, it relies on the assumption that any object with ->lookup() * has no more than 1 dentry. If "hybrid" objects will ever appear, * we'd better make sure that there's no link(2) for them. * d) some filesystems don't support opened-but-unlinked directories, * either because of layout or because they are not ready to deal with * all cases correctly. The latter will be fixed (taking this sort of * stuff into VFS), but the former is not going away. Solution: the same * trick as in rmdir(). * e) conversion from fhandle to dentry may come in the wrong moment - when * we are removing the target. Solution: we will have to grab ->i_mutex * in the fhandle_to_dentry code. [FIXME - current nfsfh.c relies on * ->i_mutex on parents, which works but leads to some truely excessive * locking]. */ static int vfs_rename_dir(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { int error = 0; struct inode *target; /* * If we are going to change the parent - check write permissions, * we'll need to flip '..'. */ if (new_dir != old_dir) { error = permission(old_dentry->d_inode, MAY_WRITE, NULL); if (error) return error; } error = security_inode_rename(old_dir, old_dentry, new_dir, new_dentry); if (error) return error; target = new_dentry->d_inode; if (target) { mutex_lock(&target->i_mutex); dentry_unhash(new_dentry); } if (d_mountpoint(old_dentry)||d_mountpoint(new_dentry)) error = -EBUSY; else error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry); if (target) { if (!error) target->i_flags |= S_DEAD; mutex_unlock(&target->i_mutex); if (d_unhashed(new_dentry)) d_rehash(new_dentry); dput(new_dentry); } if (!error) d_move(old_dentry,new_dentry); return error; } static int vfs_rename_other(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { struct inode *target; int error; error = security_inode_rename(old_dir, old_dentry, new_dir, new_dentry); if (error) return error; dget(new_dentry); target = new_dentry->d_inode; if (target) mutex_lock(&target->i_mutex); if (d_mountpoint(old_dentry)||d_mountpoint(new_dentry)) error = -EBUSY; else error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry); if (!error) { /* The following d_move() should become unconditional */ if (!(old_dir->i_sb->s_type->fs_flags & FS_ODD_RENAME)) d_move(old_dentry, new_dentry); } if (target) mutex_unlock(&target->i_mutex); dput(new_dentry); return error; } int vfs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { int error; int is_dir = S_ISDIR(old_dentry->d_inode->i_mode); const char *old_name; if (old_dentry->d_inode == new_dentry->d_inode) return 0; error = may_delete(old_dir, old_dentry, is_dir); if (error) return error; if (!new_dentry->d_inode) error = may_create(new_dir, new_dentry, NULL); else error = may_delete(new_dir, new_dentry, is_dir); if (error) return error; if (!old_dir->i_op || !old_dir->i_op->rename) return -EPERM; DQUOT_INIT(old_dir); DQUOT_INIT(new_dir); old_name = fsnotify_oldname_init(old_dentry->d_name.name); if (is_dir) error = vfs_rename_dir(old_dir,old_dentry,new_dir,new_dentry); else error = vfs_rename_other(old_dir,old_dentry,new_dir,new_dentry); if (!error) { const char *new_name = old_dentry->d_name.name; fsnotify_move(old_dir, new_dir, old_name, new_name, is_dir, new_dentry->d_inode, old_dentry->d_inode); } fsnotify_oldname_free(old_name); return error; } static int do_rename(int olddfd, const char *oldname, int newdfd, const char *newname) { int error = 0; struct dentry * old_dir, * new_dir; struct dentry * old_dentry, *new_dentry; struct dentry * trap; struct nameidata oldnd, newnd; error = do_path_lookup(olddfd, oldname, LOOKUP_PARENT, &oldnd); if (error) goto exit; error = do_path_lookup(newdfd, newname, LOOKUP_PARENT, &newnd); if (error) goto exit1; error = -EXDEV; if (oldnd.mnt != newnd.mnt) goto exit2; old_dir = oldnd.dentry; error = -EBUSY; if (oldnd.last_type != LAST_NORM) goto exit2; new_dir = newnd.dentry; if (newnd.last_type != LAST_NORM) goto exit2; trap = lock_rename(new_dir, old_dir); old_dentry = lookup_hash(&oldnd); error = PTR_ERR(old_dentry); if (IS_ERR(old_dentry)) goto exit3; /* source must exist */ error = -ENOENT; if (!old_dentry->d_inode) goto exit4; /* unless the source is a directory trailing slashes give -ENOTDIR */ if (!S_ISDIR(old_dentry->d_inode->i_mode)) { error = -ENOTDIR; if (oldnd.last.name[oldnd.last.len]) goto exit4; if (newnd.last.name[newnd.last.len]) goto exit4; } /* source should not be ancestor of target */ error = -EINVAL; if (old_dentry == trap) goto exit4; new_dentry = lookup_hash(&newnd); error = PTR_ERR(new_dentry); if (IS_ERR(new_dentry)) goto exit4; /* target should not be an ancestor of source */ error = -ENOTEMPTY; if (new_dentry == trap) goto exit5; error = vfs_rename(old_dir->d_inode, old_dentry, new_dir->d_inode, new_dentry); exit5: dput(new_dentry); exit4: dput(old_dentry); exit3: unlock_rename(new_dir, old_dir); exit2: path_release(&newnd); exit1: path_release(&oldnd); exit: return error; } asmlinkage long sys_renameat(int olddfd, const char __user *oldname, int newdfd, const char __user *newname) { int error; char * from; char * to; from = getname(oldname); if(IS_ERR(from)) return PTR_ERR(from); to = getname(newname); error = PTR_ERR(to); if (!IS_ERR(to)) { error = do_rename(olddfd, from, newdfd, to); putname(to); } putname(from); return error; } asmlinkage long sys_rename(const char __user *oldname, const char __user *newname) { return sys_renameat(AT_FDCWD, oldname, AT_FDCWD, newname); } int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link) { int len; len = PTR_ERR(link); if (IS_ERR(link)) goto out; len = strlen(link); if (len > (unsigned) buflen) len = buflen; if (copy_to_user(buffer, link, len)) len = -EFAULT; out: return len; } /* * A helper for ->readlink(). This should be used *ONLY* for symlinks that * have ->follow_link() touching nd only in nd_set_link(). Using (or not * using) it for any given inode is up to filesystem. */ int generic_readlink(struct dentry *dentry, char __user *buffer, int buflen) { struct nameidata nd; void *cookie; nd.depth = 0; cookie = dentry->d_inode->i_op->follow_link(dentry, &nd); if (!IS_ERR(cookie)) { int res = vfs_readlink(dentry, buffer, buflen, nd_get_link(&nd)); if (dentry->d_inode->i_op->put_link) dentry->d_inode->i_op->put_link(dentry, &nd, cookie); cookie = ERR_PTR(res); } return PTR_ERR(cookie); } int vfs_follow_link(struct nameidata *nd, const char *link) { return __vfs_follow_link(nd, link); } /* get the link contents into pagecache */ static char *page_getlink(struct dentry * dentry, struct page **ppage) { struct page * page; struct address_space *mapping = dentry->d_inode->i_mapping; page = read_cache_page(mapping, 0, (filler_t *)mapping->a_ops->readpage, NULL); if (IS_ERR(page)) goto sync_fail; wait_on_page_locked(page); if (!PageUptodate(page)) goto async_fail; *ppage = page; return kmap(page); async_fail: page_cache_release(page); return ERR_PTR(-EIO); sync_fail: return (char*)page; } int page_readlink(struct dentry *dentry, char __user *buffer, int buflen) { struct page *page = NULL; char *s = page_getlink(dentry, &page); int res = vfs_readlink(dentry,buffer,buflen,s); if (page) { kunmap(page); page_cache_release(page); } return res; } void *page_follow_link_light(struct dentry *dentry, struct nameidata *nd) { struct page *page = NULL; nd_set_link(nd, page_getlink(dentry, &page)); return page; } void page_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) { struct page *page = cookie; if (page) { kunmap(page); page_cache_release(page); } } int page_symlink(struct inode *inode, const char *symname, int len) { struct address_space *mapping = inode->i_mapping; struct page *page = grab_cache_page(mapping, 0); int err = -ENOMEM; char *kaddr; if (!page) goto fail; err = mapping->a_ops->prepare_write(NULL, page, 0, len-1); if (err) goto fail_map; kaddr = kmap_atomic(page, KM_USER0); memcpy(kaddr, symname, len-1); kunmap_atomic(kaddr, KM_USER0); mapping->a_ops->commit_write(NULL, page, 0, len-1); /* * Notice that we are _not_ going to block here - end of page is * unmapped, so this will only try to map the rest of page, see * that it is unmapped (typically even will not look into inode - * ->i_size will be enough for everything) and zero it out. * OTOH it's obviously correct and should make the page up-to-date. */ if (!PageUptodate(page)) { err = mapping->a_ops->readpage(NULL, page); wait_on_page_locked(page); } else { unlock_page(page); } page_cache_release(page); if (err < 0) goto fail; mark_inode_dirty(inode); return 0; fail_map: unlock_page(page); page_cache_release(page); fail: return err; } struct inode_operations page_symlink_inode_operations = { .readlink = generic_readlink, .follow_link = page_follow_link_light, .put_link = page_put_link, }; EXPORT_SYMBOL(__user_walk); EXPORT_SYMBOL(__user_walk_fd); EXPORT_SYMBOL(follow_down); EXPORT_SYMBOL(follow_up); EXPORT_SYMBOL(get_write_access); /* binfmt_aout */ EXPORT_SYMBOL(getname); EXPORT_SYMBOL(lock_rename); EXPORT_SYMBOL(lookup_hash); EXPORT_SYMBOL(lookup_one_len); EXPORT_SYMBOL(page_follow_link_light); EXPORT_SYMBOL(page_put_link); EXPORT_SYMBOL(page_readlink); EXPORT_SYMBOL(page_symlink); EXPORT_SYMBOL(page_symlink_inode_operations); EXPORT_SYMBOL(path_lookup); EXPORT_SYMBOL(path_release); EXPORT_SYMBOL(path_walk); EXPORT_SYMBOL(permission); EXPORT_SYMBOL(vfs_permission); EXPORT_SYMBOL(file_permission); EXPORT_SYMBOL(unlock_rename); EXPORT_SYMBOL(vfs_create); EXPORT_SYMBOL(vfs_follow_link); EXPORT_SYMBOL(vfs_link); EXPORT_SYMBOL(vfs_mkdir); EXPORT_SYMBOL(vfs_mknod); EXPORT_SYMBOL(generic_permission); EXPORT_SYMBOL(vfs_readlink); EXPORT_SYMBOL(vfs_rename); EXPORT_SYMBOL(vfs_rmdir); EXPORT_SYMBOL(vfs_symlink); EXPORT_SYMBOL(vfs_unlink); EXPORT_SYMBOL(dentry_unhash); EXPORT_SYMBOL(generic_readlink);