aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel/apm.c
blob: 2bed290fec76b4522bee0befb6812f141f82aaab (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
/*
 * bios-less APM driver for ARM Linux 
 *  Jamey Hicks <jamey@crl.dec.com>
 *  adapted from the APM BIOS driver for Linux by Stephen Rothwell (sfr@linuxcare.com)
 *
 * APM 1.2 Reference:
 *   Intel Corporation, Microsoft Corporation. Advanced Power Management
 *   (APM) BIOS Interface Specification, Revision 1.2, February 1996.
 *
 * [This document is available from Microsoft at:
 *    http://www.microsoft.com/hwdev/busbios/amp_12.htm]
 */
#include <linux/config.h>
#include <linux/module.h>
#include <linux/poll.h>
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/proc_fs.h>
#include <linux/miscdevice.h>
#include <linux/apm_bios.h>
#include <linux/capability.h>
#include <linux/sched.h>
#include <linux/pm.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/init.h>
#include <linux/completion.h>

#include <asm/apm.h> /* apm_power_info */
#include <asm/system.h>

/*
 * The apm_bios device is one of the misc char devices.
 * This is its minor number.
 */
#define APM_MINOR_DEV	134

/*
 * See Documentation/Config.help for the configuration options.
 *
 * Various options can be changed at boot time as follows:
 * (We allow underscores for compatibility with the modules code)
 *	apm=on/off			enable/disable APM
 */

/*
 * Maximum number of events stored
 */
#define APM_MAX_EVENTS		16

struct apm_queue {
	unsigned int		event_head;
	unsigned int		event_tail;
	apm_event_t		events[APM_MAX_EVENTS];
};

/*
 * The per-file APM data
 */
struct apm_user {
	struct list_head	list;

	unsigned int		suser: 1;
	unsigned int		writer: 1;
	unsigned int		reader: 1;

	int			suspend_result;
	unsigned int		suspend_state;
#define SUSPEND_NONE	0		/* no suspend pending */
#define SUSPEND_PENDING	1		/* suspend pending read */
#define SUSPEND_READ	2		/* suspend read, pending ack */
#define SUSPEND_ACKED	3		/* suspend acked */
#define SUSPEND_DONE	4		/* suspend completed */

	struct apm_queue	queue;
};

/*
 * Local variables
 */
static int suspends_pending;
static int apm_disabled;
static int arm_apm_active;

static DECLARE_WAIT_QUEUE_HEAD(apm_waitqueue);
static DECLARE_WAIT_QUEUE_HEAD(apm_suspend_waitqueue);

/*
 * This is a list of everyone who has opened /dev/apm_bios
 */
static DECLARE_RWSEM(user_list_lock);
static LIST_HEAD(apm_user_list);

/*
 * kapmd info.  kapmd provides us a process context to handle
 * "APM" events within - specifically necessary if we're going
 * to be suspending the system.
 */
static DECLARE_WAIT_QUEUE_HEAD(kapmd_wait);
static DECLARE_COMPLETION(kapmd_exit);
static DEFINE_SPINLOCK(kapmd_queue_lock);
static struct apm_queue kapmd_queue;


static const char driver_version[] = "1.13";	/* no spaces */



/*
 * Compatibility cruft until the IPAQ people move over to the new
 * interface.
 */
static void __apm_get_power_status(struct apm_power_info *info)
{
}

/*
 * This allows machines to provide their own "apm get power status" function.
 */
void (*apm_get_power_status)(struct apm_power_info *) = __apm_get_power_status;
EXPORT_SYMBOL(apm_get_power_status);


/*
 * APM event queue management.
 */
static inline int queue_empty(struct apm_queue *q)
{
	return q->event_head == q->event_tail;
}

static inline apm_event_t queue_get_event(struct apm_queue *q)
{
	q->event_tail = (q->event_tail + 1) % APM_MAX_EVENTS;
	return q->events[q->event_tail];
}

static void queue_add_event(struct apm_queue *q, apm_event_t event)
{
	q->event_head = (q->event_head + 1) % APM_MAX_EVENTS;
	if (q->event_head == q->event_tail) {
		static int notified;

		if (notified++ == 0)
		    printk(KERN_ERR "apm: an event queue overflowed\n");
		q->event_tail = (q->event_tail + 1) % APM_MAX_EVENTS;
	}
	q->events[q->event_head] = event;
}

static void queue_event_one_user(struct apm_user *as, apm_event_t event)
{
	if (as->suser && as->writer) {
		switch (event) {
		case APM_SYS_SUSPEND:
		case APM_USER_SUSPEND:
			/*
			 * If this user already has a suspend pending,
			 * don't queue another one.
			 */
			if (as->suspend_state != SUSPEND_NONE)
				return;

			as->suspend_state = SUSPEND_PENDING;
			suspends_pending++;
			break;
		}
	}
	queue_add_event(&as->queue, event);
}

static void queue_event(apm_event_t event, struct apm_user *sender)
{
	struct apm_user *as;

	down_read(&user_list_lock);
	list_for_each_entry(as, &apm_user_list, list) {
		if (as != sender && as->reader)
			queue_event_one_user(as, event);
	}
	up_read(&user_list_lock);
	wake_up_interruptible(&apm_waitqueue);
}

static void apm_suspend(void)
{
	struct apm_user *as;
	int err = pm_suspend(PM_SUSPEND_MEM);

	/*
	 * Anyone on the APM queues will think we're still suspended.
	 * Send a message so everyone knows we're now awake again.
	 */
	queue_event(APM_NORMAL_RESUME, NULL);

	/*
	 * Finally, wake up anyone who is sleeping on the suspend.
	 */
	down_read(&user_list_lock);
	list_for_each_entry(as, &apm_user_list, list) {
		as->suspend_result = err;
		as->suspend_state = SUSPEND_DONE;
	}
	up_read(&user_list_lock);

	wake_up(&apm_suspend_waitqueue);
}

static ssize_t apm_read(struct file *fp, char __user *buf, size_t count, loff_t *ppos)
{
	struct apm_user *as = fp->private_data;
	apm_event_t event;
	int i = count, ret = 0;

	if (count < sizeof(apm_event_t))
		return -EINVAL;

	if (queue_empty(&as->queue) && fp->f_flags & O_NONBLOCK)
		return -EAGAIN;

	wait_event_interruptible(apm_waitqueue, !queue_empty(&as->queue));

	while ((i >= sizeof(event)) && !queue_empty(&as->queue)) {
		event = queue_get_event(&as->queue);

		ret = -EFAULT;
		if (copy_to_user(buf, &event, sizeof(event)))
			break;

		if (event == APM_SYS_SUSPEND || event == APM_USER_SUSPEND)
			as->suspend_state = SUSPEND_READ;

		buf += sizeof(event);
		i -= sizeof(event);
	}

	if (i < count)
		ret = count - i;

	return ret;
}

static unsigned int apm_poll(struct file *fp, poll_table * wait)
{
	struct apm_user *as = fp->private_data;

	poll_wait(fp, &apm_waitqueue, wait);
	return queue_empty(&as->queue) ? 0 : POLLIN | POLLRDNORM;
}

/*
 * apm_ioctl - handle APM ioctl
 *
 * APM_IOC_SUSPEND
 *   This IOCTL is overloaded, and performs two functions.  It is used to:
 *     - initiate a suspend
 *     - acknowledge a suspend read from /dev/apm_bios.
 *   Only when everyone who has opened /dev/apm_bios with write permission
 *   has acknowledge does the actual suspend happen.
 */
static int
apm_ioctl(struct inode * inode, struct file *filp, u_int cmd, u_long arg)
{
	struct apm_user *as = filp->private_data;
	unsigned long flags;
	int err = -EINVAL;

	if (!as->suser || !as->writer)
		return -EPERM;

	switch (cmd) {
	case APM_IOC_SUSPEND:
		as->suspend_result = -EINTR;

		if (as->suspend_state == SUSPEND_READ) {
			/*
			 * If we read a suspend command from /dev/apm_bios,
			 * then the corresponding APM_IOC_SUSPEND ioctl is
			 * interpreted as an acknowledge.
			 */
			as->suspend_state = SUSPEND_ACKED;
			suspends_pending--;
		} else {
			/*
			 * Otherwise it is a request to suspend the system.
			 * Queue an event for all readers, and expect an
			 * acknowledge from all writers who haven't already
			 * acknowledged.
			 */
			queue_event(APM_USER_SUSPEND, as);
		}

		/*
		 * If there are no further acknowledges required, suspend
		 * the system.
		 */
		if (suspends_pending == 0)
			apm_suspend();

		/*
		 * Wait for the suspend/resume to complete.  If there are
		 * pending acknowledges, we wait here for them.
		 *
		 * Note that we need to ensure that the PM subsystem does
		 * not kick us out of the wait when it suspends the threads.
		 */
		flags = current->flags;
		current->flags |= PF_NOFREEZE;

		/*
		 * Note: do not allow a thread which is acking the suspend
		 * to escape until the resume is complete.
		 */
		if (as->suspend_state == SUSPEND_ACKED)
			wait_event(apm_suspend_waitqueue,
					 as->suspend_state == SUSPEND_DONE);
		else
			wait_event_interruptible(apm_suspend_waitqueue,
					 as->suspend_state == SUSPEND_DONE);

		current->flags = flags;
		err = as->suspend_result;
		as->suspend_state = SUSPEND_NONE;
		break;
	}

	return err;
}

static int apm_release(struct inode * inode, struct file * filp)
{
	struct apm_user *as = filp->private_data;
	filp->private_data = NULL;

	down_write(&user_list_lock);
	list_del(&as->list);
	up_write(&user_list_lock);

	/*
	 * We are now unhooked from the chain.  As far as new
	 * events are concerned, we no longer exist.  However, we
	 * need to balance suspends_pending, which means the
	 * possibility of sleeping.
	 */
	if (as->suspend_state != SUSPEND_NONE) {
		suspends_pending -= 1;
		if (suspends_pending == 0)
			apm_suspend();
	}

	kfree(as);
	return 0;
}

static int apm_open(struct inode * inode, struct file * filp)
{
	struct apm_user *as;

	as = (struct apm_user *)kzalloc(sizeof(*as), GFP_KERNEL);
	if (as) {
		/*
		 * XXX - this is a tiny bit broken, when we consider BSD
		 * process accounting. If the device is opened by root, we
		 * instantly flag that we used superuser privs. Who knows,
		 * we might close the device immediately without doing a
		 * privileged operation -- cevans
		 */
		as->suser = capable(CAP_SYS_ADMIN);
		as->writer = (filp->f_mode & FMODE_WRITE) == FMODE_WRITE;
		as->reader = (filp->f_mode & FMODE_READ) == FMODE_READ;

		down_write(&user_list_lock);
		list_add(&as->list, &apm_user_list);
		up_write(&user_list_lock);

		filp->private_data = as;
	}

	return as ? 0 : -ENOMEM;
}

static struct file_operations apm_bios_fops = {
	.owner		= THIS_MODULE,
	.read		= apm_read,
	.poll		= apm_poll,
	.ioctl		= apm_ioctl,
	.open		= apm_open,
	.release	= apm_release,
};

static struct miscdevice apm_device = {
	.minor		= APM_MINOR_DEV,
	.name		= "apm_bios",
	.fops		= &apm_bios_fops
};


#ifdef CONFIG_PROC_FS
/*
 * Arguments, with symbols from linux/apm_bios.h.
 *
 *   0) Linux driver version (this will change if format changes)
 *   1) APM BIOS Version.  Usually 1.0, 1.1 or 1.2.
 *   2) APM flags from APM Installation Check (0x00):
 *	bit 0: APM_16_BIT_SUPPORT
 *	bit 1: APM_32_BIT_SUPPORT
 *	bit 2: APM_IDLE_SLOWS_CLOCK
 *	bit 3: APM_BIOS_DISABLED
 *	bit 4: APM_BIOS_DISENGAGED
 *   3) AC line status
 *	0x00: Off-line
 *	0x01: On-line
 *	0x02: On backup power (BIOS >= 1.1 only)
 *	0xff: Unknown
 *   4) Battery status
 *	0x00: High
 *	0x01: Low
 *	0x02: Critical
 *	0x03: Charging
 *	0x04: Selected battery not present (BIOS >= 1.2 only)
 *	0xff: Unknown
 *   5) Battery flag
 *	bit 0: High
 *	bit 1: Low
 *	bit 2: Critical
 *	bit 3: Charging
 *	bit 7: No system battery
 *	0xff: Unknown
 *   6) Remaining battery life (percentage of charge):
 *	0-100: valid
 *	-1: Unknown
 *   7) Remaining battery life (time units):
 *	Number of remaining minutes or seconds
 *	-1: Unknown
 *   8) min = minutes; sec = seconds
 */
static int apm_get_info(char *buf, char **start, off_t fpos, int length)
{
	struct apm_power_info info;
	char *units;
	int ret;

	info.ac_line_status = 0xff;
	info.battery_status = 0xff;
	info.battery_flag   = 0xff;
	info.battery_life   = -1;
	info.time	    = -1;
	info.units	    = -1;

	if (apm_get_power_status)
		apm_get_power_status(&info);

	switch (info.units) {
	default:	units = "?";	break;
	case 0: 	units = "min";	break;
	case 1: 	units = "sec";	break;
	}

	ret = sprintf(buf, "%s 1.2 0x%02x 0x%02x 0x%02x 0x%02x %d%% %d %s\n",
		     driver_version, APM_32_BIT_SUPPORT,
		     info.ac_line_status, info.battery_status,
		     info.battery_flag, info.battery_life,
		     info.time, units);

 	return ret;
}
#endif

static int kapmd(void *arg)
{
	daemonize("kapmd");
	current->flags |= PF_NOFREEZE;

	do {
		apm_event_t event;

		wait_event_interruptible(kapmd_wait,
				!queue_empty(&kapmd_queue) || !arm_apm_active);

		if (!arm_apm_active)
			break;

		spin_lock_irq(&kapmd_queue_lock);
		event = 0;
		if (!queue_empty(&kapmd_queue))
			event = queue_get_event(&kapmd_queue);
		spin_unlock_irq(&kapmd_queue_lock);

		switch (event) {
		case 0:
			break;

		case APM_LOW_BATTERY:
		case APM_POWER_STATUS_CHANGE:
			queue_event(event, NULL);
			break;

		case APM_USER_SUSPEND:
		case APM_SYS_SUSPEND:
			queue_event(event, NULL);
			if (suspends_pending == 0)
				apm_suspend();
			break;

		case APM_CRITICAL_SUSPEND:
			apm_suspend();
			break;
		}
	} while (1);

	complete_and_exit(&kapmd_exit, 0);
}

static int __init apm_init(void)
{
	int ret;

	if (apm_disabled) {
		printk(KERN_NOTICE "apm: disabled on user request.\n");
		return -ENODEV;
	}

	arm_apm_active = 1;

	ret = kernel_thread(kapmd, NULL, CLONE_KERNEL);
	if (ret < 0) {
		arm_apm_active = 0;
		return ret;
	}

#ifdef CONFIG_PROC_FS
	create_proc_info_entry("apm", 0, NULL, apm_get_info);
#endif

	ret = misc_register(&apm_device);
	if (ret != 0) {
		remove_proc_entry("apm", NULL);

		arm_apm_active = 0;
		wake_up(&kapmd_wait);
		wait_for_completion(&kapmd_exit);
	}

	return ret;
}

static void __exit apm_exit(void)
{
	misc_deregister(&apm_device);
	remove_proc_entry("apm", NULL);

	arm_apm_active = 0;
	wake_up(&kapmd_wait);
	wait_for_completion(&kapmd_exit);
}

module_init(apm_init);
module_exit(apm_exit);

MODULE_AUTHOR("Stephen Rothwell");
MODULE_DESCRIPTION("Advanced Power Management");
MODULE_LICENSE("GPL");

#ifndef MODULE
static int __init apm_setup(char *str)
{
	while ((str != NULL) && (*str != '\0')) {
		if (strncmp(str, "off", 3) == 0)
			apm_disabled = 1;
		if (strncmp(str, "on", 2) == 0)
			apm_disabled = 0;
		str = strchr(str, ',');
		if (str != NULL)
			str += strspn(str, ", \t");
	}
	return 1;
}

__setup("apm=", apm_setup);
#endif

/**
 * apm_queue_event - queue an APM event for kapmd
 * @event: APM event
 *
 * Queue an APM event for kapmd to process and ultimately take the
 * appropriate action.  Only a subset of events are handled:
 *   %APM_LOW_BATTERY
 *   %APM_POWER_STATUS_CHANGE
 *   %APM_USER_SUSPEND
 *   %APM_SYS_SUSPEND
 *   %APM_CRITICAL_SUSPEND
 */
void apm_queue_event(apm_event_t event)
{
	unsigned long flags;

	spin_lock_irqsave(&kapmd_queue_lock, flags);
	queue_add_event(&kapmd_queue, event);
	spin_unlock_irqrestore(&kapmd_queue_lock, flags);

	wake_up_interruptible(&kapmd_wait);
}
EXPORT_SYMBOL(apm_queue_event);
0'>2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144
/*
 * NET4:	Implementation of BSD Unix domain sockets.
 *
 * Authors:	Alan Cox, <alan.cox@linux.org>
 *
 *		This program is free software; you can redistribute it and/or
 *		modify it under the terms of the GNU General Public License
 *		as published by the Free Software Foundation; either version
 *		2 of the License, or (at your option) any later version.
 *
 * Version:	$Id: af_unix.c,v 1.133 2002/02/08 03:57:19 davem Exp $
 *
 * Fixes:
 *		Linus Torvalds	:	Assorted bug cures.
 *		Niibe Yutaka	:	async I/O support.
 *		Carsten Paeth	:	PF_UNIX check, address fixes.
 *		Alan Cox	:	Limit size of allocated blocks.
 *		Alan Cox	:	Fixed the stupid socketpair bug.
 *		Alan Cox	:	BSD compatibility fine tuning.
 *		Alan Cox	:	Fixed a bug in connect when interrupted.
 *		Alan Cox	:	Sorted out a proper draft version of
 *					file descriptor passing hacked up from
 *					Mike Shaver's work.
 *		Marty Leisner	:	Fixes to fd passing
 *		Nick Nevin	:	recvmsg bugfix.
 *		Alan Cox	:	Started proper garbage collector
 *		Heiko EiBfeldt	:	Missing verify_area check
 *		Alan Cox	:	Started POSIXisms
 *		Andreas Schwab	:	Replace inode by dentry for proper
 *					reference counting
 *		Kirk Petersen	:	Made this a module
 *	    Christoph Rohland	:	Elegant non-blocking accept/connect algorithm.
 *					Lots of bug fixes.
 *	     Alexey Kuznetosv	:	Repaired (I hope) bugs introduces
 *					by above two patches.
 *	     Andrea Arcangeli	:	If possible we block in connect(2)
 *					if the max backlog of the listen socket
 *					is been reached. This won't break
 *					old apps and it will avoid huge amount
 *					of socks hashed (this for unix_gc()
 *					performances reasons).
 *					Security fix that limits the max
 *					number of socks to 2*max_files and
 *					the number of skb queueable in the
 *					dgram receiver.
 *		Artur Skawina   :	Hash function optimizations
 *	     Alexey Kuznetsov   :	Full scale SMP. Lot of bugs are introduced 8)
 *	      Malcolm Beattie   :	Set peercred for socketpair
 *	     Michal Ostrowski   :       Module initialization cleanup.
 *	     Arnaldo C. Melo	:	Remove MOD_{INC,DEC}_USE_COUNT,
 *	     				the core infrastructure is doing that
 *	     				for all net proto families now (2.5.69+)
 *
 *
 * Known differences from reference BSD that was tested:
 *
 *	[TO FIX]
 *	ECONNREFUSED is not returned from one end of a connected() socket to the
 *		other the moment one end closes.
 *	fstat() doesn't return st_dev=0, and give the blksize as high water mark
 *		and a fake inode identifier (nor the BSD first socket fstat twice bug).
 *	[NOT TO FIX]
 *	accept() returns a path name even if the connecting socket has closed
 *		in the meantime (BSD loses the path and gives up).
 *	accept() returns 0 length path for an unbound connector. BSD returns 16
 *		and a null first byte in the path (but not for gethost/peername - BSD bug ??)
 *	socketpair(...SOCK_RAW..) doesn't panic the kernel.
 *	BSD af_unix apparently has connect forgetting to block properly.
 *		(need to check this with the POSIX spec in detail)
 *
 * Differences from 2.0.0-11-... (ANK)
 *	Bug fixes and improvements.
 *		- client shutdown killed server socket.
 *		- removed all useless cli/sti pairs.
 *
 *	Semantic changes/extensions.
 *		- generic control message passing.
 *		- SCM_CREDENTIALS control message.
 *		- "Abstract" (not FS based) socket bindings.
 *		  Abstract names are sequences of bytes (not zero terminated)
 *		  started by 0, so that this name space does not intersect
 *		  with BSD names.
 */

#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/stat.h>
#include <linux/dcache.h>
#include <linux/namei.h>
#include <linux/socket.h>
#include <linux/un.h>
#include <linux/fcntl.h>
#include <linux/termios.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/in.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <asm/uaccess.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include <net/tcp_states.h>
#include <net/af_unix.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <net/scm.h>
#include <linux/init.h>
#include <linux/poll.h>
#include <linux/rtnetlink.h>
#include <linux/mount.h>
#include <net/checksum.h>
#include <linux/security.h>

int sysctl_unix_max_dgram_qlen __read_mostly = 10;

static struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1];
static DEFINE_SPINLOCK(unix_table_lock);
static atomic_t unix_nr_socks = ATOMIC_INIT(0);

#define unix_sockets_unbound	(&unix_socket_table[UNIX_HASH_SIZE])

#define UNIX_ABSTRACT(sk)	(unix_sk(sk)->addr->hash != UNIX_HASH_SIZE)

static struct sock *first_unix_socket(int *i)
{
	for (*i = 0; *i <= UNIX_HASH_SIZE; (*i)++) {
		if (!hlist_empty(&unix_socket_table[*i]))
			return __sk_head(&unix_socket_table[*i]);
	}
	return NULL;
}

static struct sock *next_unix_socket(int *i, struct sock *s)
{
	struct sock *next = sk_next(s);
	/* More in this chain? */
	if (next)
		return next;
	/* Look for next non-empty chain. */
	for ((*i)++; *i <= UNIX_HASH_SIZE; (*i)++) {
		if (!hlist_empty(&unix_socket_table[*i]))
			return __sk_head(&unix_socket_table[*i]);
	}
	return NULL;
}

#define forall_unix_sockets(i, s) \
	for (s = first_unix_socket(&(i)); s; s = next_unix_socket(&(i),(s)))

#ifdef CONFIG_SECURITY_NETWORK
static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
{
	memcpy(UNIXSID(skb), &scm->secid, sizeof(u32));
}

static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
{
	scm->secid = *UNIXSID(skb);
}
#else
static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
{ }

static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
{ }
#endif /* CONFIG_SECURITY_NETWORK */

/*
 *  SMP locking strategy:
 *    hash table is protected with spinlock unix_table_lock
 *    each socket state is protected by separate rwlock.
 */

static inline unsigned unix_hash_fold(__wsum n)
{
	unsigned hash = (__force unsigned)n;
	hash ^= hash>>16;
	hash ^= hash>>8;
	return hash&(UNIX_HASH_SIZE-1);
}

#define unix_peer(sk) (unix_sk(sk)->peer)

static inline int unix_our_peer(struct sock *sk, struct sock *osk)
{
	return unix_peer(osk) == sk;
}

static inline int unix_may_send(struct sock *sk, struct sock *osk)
{
	return (unix_peer(osk) == NULL || unix_our_peer(sk, osk));
}

static struct sock *unix_peer_get(struct sock *s)
{
	struct sock *peer;

	unix_state_lock(s);
	peer = unix_peer(s);
	if (peer)
		sock_hold(peer);
	unix_state_unlock(s);
	return peer;
}

static inline void unix_release_addr(struct unix_address *addr)
{
	if (atomic_dec_and_test(&addr->refcnt))
		kfree(addr);
}

/*
 *	Check unix socket name:
 *		- should be not zero length.
 *	        - if started by not zero, should be NULL terminated (FS object)
 *		- if started by zero, it is abstract name.
 */

static int unix_mkname(struct sockaddr_un * sunaddr, int len, unsigned *hashp)
{
	if (len <= sizeof(short) || len > sizeof(*sunaddr))
		return -EINVAL;
	if (!sunaddr || sunaddr->sun_family != AF_UNIX)
		return -EINVAL;
	if (sunaddr->sun_path[0]) {
		/*
		 * This may look like an off by one error but it is a bit more
		 * subtle. 108 is the longest valid AF_UNIX path for a binding.
		 * sun_path[108] doesnt as such exist.  However in kernel space
		 * we are guaranteed that it is a valid memory location in our
		 * kernel address buffer.
		 */
		((char *)sunaddr)[len]=0;
		len = strlen(sunaddr->sun_path)+1+sizeof(short);
		return len;
	}

	*hashp = unix_hash_fold(csum_partial((char*)sunaddr, len, 0));
	return len;
}

static void __unix_remove_socket(struct sock *sk)
{
	sk_del_node_init(sk);
}

static void __unix_insert_socket(struct hlist_head *list, struct sock *sk)
{
	BUG_TRAP(sk_unhashed(sk));
	sk_add_node(sk, list);
}

static inline void unix_remove_socket(struct sock *sk)
{
	spin_lock(&unix_table_lock);
	__unix_remove_socket(sk);
	spin_unlock(&unix_table_lock);
}

static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk)
{
	spin_lock(&unix_table_lock);
	__unix_insert_socket(list, sk);
	spin_unlock(&unix_table_lock);
}

static struct sock *__unix_find_socket_byname(struct sockaddr_un *sunname,
					      int len, int type, unsigned hash)
{
	struct sock *s;
	struct hlist_node *node;

	sk_for_each(s, node, &unix_socket_table[hash ^ type]) {
		struct unix_sock *u = unix_sk(s);

		if (u->addr->len == len &&
		    !memcmp(u->addr->name, sunname, len))
			goto found;
	}
	s = NULL;
found:
	return s;
}

static inline struct sock *unix_find_socket_byname(struct sockaddr_un *sunname,
						   int len, int type,
						   unsigned hash)
{
	struct sock *s;

	spin_lock(&unix_table_lock);
	s = __unix_find_socket_byname(sunname, len, type, hash);
	if (s)
		sock_hold(s);
	spin_unlock(&unix_table_lock);
	return s;
}

static struct sock *unix_find_socket_byinode(struct inode *i)
{
	struct sock *s;
	struct hlist_node *node;

	spin_lock(&unix_table_lock);
	sk_for_each(s, node,
		    &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
		struct dentry *dentry = unix_sk(s)->dentry;

		if(dentry && dentry->d_inode == i)
		{
			sock_hold(s);
			goto found;
		}
	}
	s = NULL;
found:
	spin_unlock(&unix_table_lock);
	return s;
}

static inline int unix_writable(struct sock *sk)
{
	return (atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
}

static void unix_write_space(struct sock *sk)
{
	read_lock(&sk->sk_callback_lock);
	if (unix_writable(sk)) {
		if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
			wake_up_interruptible_sync(sk->sk_sleep);
		sk_wake_async(sk, 2, POLL_OUT);
	}
	read_unlock(&sk->sk_callback_lock);
}

/* When dgram socket disconnects (or changes its peer), we clear its receive
 * queue of packets arrived from previous peer. First, it allows to do
 * flow control based only on wmem_alloc; second, sk connected to peer
 * may receive messages only from that peer. */
static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
{
	if (!skb_queue_empty(&sk->sk_receive_queue)) {
		skb_queue_purge(&sk->sk_receive_queue);
		wake_up_interruptible_all(&unix_sk(sk)->peer_wait);

		/* If one link of bidirectional dgram pipe is disconnected,
		 * we signal error. Messages are lost. Do not make this,
		 * when peer was not connected to us.
		 */
		if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
			other->sk_err = ECONNRESET;
			other->sk_error_report(other);
		}
	}
}

static void unix_sock_destructor(struct sock *sk)
{
	struct unix_sock *u = unix_sk(sk);

	skb_queue_purge(&sk->sk_receive_queue);

	BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc));
	BUG_TRAP(sk_unhashed(sk));
	BUG_TRAP(!sk->sk_socket);
	if (!sock_flag(sk, SOCK_DEAD)) {
		printk("Attempt to release alive unix socket: %p\n", sk);
		return;
	}

	if (u->addr)
		unix_release_addr(u->addr);

	atomic_dec(&unix_nr_socks);
#ifdef UNIX_REFCNT_DEBUG
	printk(KERN_DEBUG "UNIX %p is destroyed, %d are still alive.\n", sk, atomic_read(&unix_nr_socks));
#endif
}

static int unix_release_sock (struct sock *sk, int embrion)
{
	struct unix_sock *u = unix_sk(sk);
	struct dentry *dentry;
	struct vfsmount *mnt;
	struct sock *skpair;
	struct sk_buff *skb;
	int state;

	unix_remove_socket(sk);

	/* Clear state */
	unix_state_lock(sk);
	sock_orphan(sk);
	sk->sk_shutdown = SHUTDOWN_MASK;
	dentry	     = u->dentry;
	u->dentry    = NULL;
	mnt	     = u->mnt;
	u->mnt	     = NULL;
	state = sk->sk_state;
	sk->sk_state = TCP_CLOSE;
	unix_state_unlock(sk);

	wake_up_interruptible_all(&u->peer_wait);

	skpair=unix_peer(sk);

	if (skpair!=NULL) {
		if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
			unix_state_lock(skpair);
			/* No more writes */
			skpair->sk_shutdown = SHUTDOWN_MASK;
			if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
				skpair->sk_err = ECONNRESET;
			unix_state_unlock(skpair);
			skpair->sk_state_change(skpair);
			read_lock(&skpair->sk_callback_lock);
			sk_wake_async(skpair,1,POLL_HUP);
			read_unlock(&skpair->sk_callback_lock);
		}
		sock_put(skpair); /* It may now die */
		unix_peer(sk) = NULL;
	}

	/* Try to flush out this socket. Throw out buffers at least */

	while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
		if (state==TCP_LISTEN)
			unix_release_sock(skb->sk, 1);
		/* passed fds are erased in the kfree_skb hook	      */
		kfree_skb(skb);
	}

	if (dentry) {
		dput(dentry);
		mntput(mnt);
	}

	sock_put(sk);

	/* ---- Socket is dead now and most probably destroyed ---- */

	/*
	 * Fixme: BSD difference: In BSD all sockets connected to use get
	 *	  ECONNRESET and we die on the spot. In Linux we behave
	 *	  like files and pipes do and wait for the last
	 *	  dereference.
	 *
	 * Can't we simply set sock->err?
	 *
	 *	  What the above comment does talk about? --ANK(980817)
	 */

	if (unix_tot_inflight)
		unix_gc();		/* Garbage collect fds */

	return 0;
}

static int unix_listen(struct socket *sock, int backlog)
{
	int err;
	struct sock *sk = sock->sk;
	struct unix_sock *u = unix_sk(sk);

	err = -EOPNOTSUPP;
	if (sock->type!=SOCK_STREAM && sock->type!=SOCK_SEQPACKET)
		goto out;			/* Only stream/seqpacket sockets accept */
	err = -EINVAL;
	if (!u->addr)
		goto out;			/* No listens on an unbound socket */
	unix_state_lock(sk);
	if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
		goto out_unlock;
	if (backlog > sk->sk_max_ack_backlog)
		wake_up_interruptible_all(&u->peer_wait);
	sk->sk_max_ack_backlog	= backlog;
	sk->sk_state		= TCP_LISTEN;
	/* set credentials so connect can copy them */
	sk->sk_peercred.pid	= task_tgid_vnr(current);
	sk->sk_peercred.uid	= current->euid;
	sk->sk_peercred.gid	= current->egid;
	err = 0;

out_unlock:
	unix_state_unlock(sk);
out:
	return err;
}

static int unix_release(struct socket *);
static int unix_bind(struct socket *, struct sockaddr *, int);
static int unix_stream_connect(struct socket *, struct sockaddr *,
			       int addr_len, int flags);
static int unix_socketpair(struct socket *, struct socket *);
static int unix_accept(struct socket *, struct socket *, int);
static int unix_getname(struct socket *, struct sockaddr *, int *, int);
static unsigned int unix_poll(struct file *, struct socket *, poll_table *);
static int unix_ioctl(struct socket *, unsigned int, unsigned long);
static int unix_shutdown(struct socket *, int);
static int unix_stream_sendmsg(struct kiocb *, struct socket *,
			       struct msghdr *, size_t);
static int unix_stream_recvmsg(struct kiocb *, struct socket *,
			       struct msghdr *, size_t, int);
static int unix_dgram_sendmsg(struct kiocb *, struct socket *,
			      struct msghdr *, size_t);
static int unix_dgram_recvmsg(struct kiocb *, struct socket *,
			      struct msghdr *, size_t, int);
static int unix_dgram_connect(struct socket *, struct sockaddr *,
			      int, int);
static int unix_seqpacket_sendmsg(struct kiocb *, struct socket *,
				  struct msghdr *, size_t);

static const struct proto_ops unix_stream_ops = {
	.family =	PF_UNIX,
	.owner =	THIS_MODULE,
	.release =	unix_release,
	.bind =		unix_bind,
	.connect =	unix_stream_connect,
	.socketpair =	unix_socketpair,
	.accept =	unix_accept,
	.getname =	unix_getname,
	.poll =		unix_poll,
	.ioctl =	unix_ioctl,
	.listen =	unix_listen,
	.shutdown =	unix_shutdown,
	.setsockopt =	sock_no_setsockopt,
	.getsockopt =	sock_no_getsockopt,
	.sendmsg =	unix_stream_sendmsg,
	.recvmsg =	unix_stream_recvmsg,
	.mmap =		sock_no_mmap,
	.sendpage =	sock_no_sendpage,
};

static const struct proto_ops unix_dgram_ops = {
	.family =	PF_UNIX,
	.owner =	THIS_MODULE,
	.release =	unix_release,
	.bind =		unix_bind,
	.connect =	unix_dgram_connect,
	.socketpair =	unix_socketpair,
	.accept =	sock_no_accept,
	.getname =	unix_getname,
	.poll =		datagram_poll,
	.ioctl =	unix_ioctl,
	.listen =	sock_no_listen,
	.shutdown =	unix_shutdown,
	.setsockopt =	sock_no_setsockopt,
	.getsockopt =	sock_no_getsockopt,
	.sendmsg =	unix_dgram_sendmsg,
	.recvmsg =	unix_dgram_recvmsg,
	.mmap =		sock_no_mmap,
	.sendpage =	sock_no_sendpage,
};

static const struct proto_ops unix_seqpacket_ops = {
	.family =	PF_UNIX,
	.owner =	THIS_MODULE,
	.release =	unix_release,
	.bind =		unix_bind,
	.connect =	unix_stream_connect,
	.socketpair =	unix_socketpair,
	.accept =	unix_accept,
	.getname =	unix_getname,
	.poll =		datagram_poll,
	.ioctl =	unix_ioctl,
	.listen =	unix_listen,
	.shutdown =	unix_shutdown,
	.setsockopt =	sock_no_setsockopt,
	.getsockopt =	sock_no_getsockopt,
	.sendmsg =	unix_seqpacket_sendmsg,
	.recvmsg =	unix_dgram_recvmsg,
	.mmap =		sock_no_mmap,
	.sendpage =	sock_no_sendpage,
};

static struct proto unix_proto = {
	.name	  = "UNIX",
	.owner	  = THIS_MODULE,
	.obj_size = sizeof(struct unix_sock),
};

/*
 * AF_UNIX sockets do not interact with hardware, hence they
 * dont trigger interrupts - so it's safe for them to have
 * bh-unsafe locking for their sk_receive_queue.lock. Split off
 * this special lock-class by reinitializing the spinlock key:
 */
static struct lock_class_key af_unix_sk_receive_queue_lock_key;

static struct sock * unix_create1(struct net *net, struct socket *sock)
{
	struct sock *sk = NULL;
	struct unix_sock *u;

	atomic_inc(&unix_nr_socks);
	if (atomic_read(&unix_nr_socks) > 2 * get_max_files())
		goto out;

	sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto);
	if (!sk)
		goto out;

	sock_init_data(sock,sk);
	lockdep_set_class(&sk->sk_receive_queue.lock,
				&af_unix_sk_receive_queue_lock_key);

	sk->sk_write_space	= unix_write_space;
	sk->sk_max_ack_backlog	= sysctl_unix_max_dgram_qlen;
	sk->sk_destruct		= unix_sock_destructor;
	u	  = unix_sk(sk);
	u->dentry = NULL;
	u->mnt	  = NULL;
	spin_lock_init(&u->lock);
	atomic_set(&u->inflight, 0);
	INIT_LIST_HEAD(&u->link);
	mutex_init(&u->readlock); /* single task reading lock */
	init_waitqueue_head(&u->peer_wait);
	unix_insert_socket(unix_sockets_unbound, sk);
out:
	if (sk == NULL)
		atomic_dec(&unix_nr_socks);
	return sk;
}

static int unix_create(struct net *net, struct socket *sock, int protocol)
{
	if (net != &init_net)
		return -EAFNOSUPPORT;

	if (protocol && protocol != PF_UNIX)
		return -EPROTONOSUPPORT;

	sock->state = SS_UNCONNECTED;

	switch (sock->type) {
	case SOCK_STREAM:
		sock->ops = &unix_stream_ops;
		break;
		/*
		 *	Believe it or not BSD has AF_UNIX, SOCK_RAW though
		 *	nothing uses it.
		 */
	case SOCK_RAW:
		sock->type=SOCK_DGRAM;
	case SOCK_DGRAM:
		sock->ops = &unix_dgram_ops;
		break;
	case SOCK_SEQPACKET:
		sock->ops = &unix_seqpacket_ops;
		break;
	default:
		return -ESOCKTNOSUPPORT;
	}

	return unix_create1(net, sock) ? 0 : -ENOMEM;
}

static int unix_release(struct socket *sock)
{
	struct sock *sk = sock->sk;

	if (!sk)
		return 0;

	sock->sk = NULL;

	return unix_release_sock (sk, 0);
}

static int unix_autobind(struct socket *sock)
{
	struct sock *sk = sock->sk;
	struct unix_sock *u = unix_sk(sk);
	static u32 ordernum = 1;
	struct unix_address * addr;
	int err;

	mutex_lock(&u->readlock);

	err = 0;
	if (u->addr)
		goto out;

	err = -ENOMEM;
	addr = kzalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL);
	if (!addr)
		goto out;

	addr->name->sun_family = AF_UNIX;
	atomic_set(&addr->refcnt, 1);

retry:
	addr->len = sprintf(addr->name->sun_path+1, "%05x", ordernum) + 1 + sizeof(short);
	addr->hash = unix_hash_fold(csum_partial((void*)addr->name, addr->len, 0));

	spin_lock(&unix_table_lock);
	ordernum = (ordernum+1)&0xFFFFF;

	if (__unix_find_socket_byname(addr->name, addr->len, sock->type,
				      addr->hash)) {
		spin_unlock(&unix_table_lock);
		/* Sanity yield. It is unusual case, but yet... */
		if (!(ordernum&0xFF))
			yield();
		goto retry;
	}
	addr->hash ^= sk->sk_type;

	__unix_remove_socket(sk);
	u->addr = addr;
	__unix_insert_socket(&unix_socket_table[addr->hash], sk);
	spin_unlock(&unix_table_lock);
	err = 0;

out:	mutex_unlock(&u->readlock);
	return err;
}

static struct sock *unix_find_other(struct sockaddr_un *sunname, int len,
				    int type, unsigned hash, int *error)
{
	struct sock *u;
	struct nameidata nd;