aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/char
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/char')
-rw-r--r--drivers/char/hvc_iucv.c6
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c71
-rw-r--r--drivers/char/mem.c195
-rw-r--r--drivers/char/mmtimer.c2
-rw-r--r--drivers/char/n_tty.c17
-rw-r--r--drivers/char/tty_audit.c1
6 files changed, 190 insertions, 102 deletions
diff --git a/drivers/char/hvc_iucv.c b/drivers/char/hvc_iucv.c
index 21681a81cc35..37b0542a4eeb 100644
--- a/drivers/char/hvc_iucv.c
+++ b/drivers/char/hvc_iucv.c
@@ -139,6 +139,8 @@ struct hvc_iucv_private *hvc_iucv_get_private(uint32_t num)
139 * 139 *
140 * This function allocates a new struct iucv_tty_buffer element and, optionally, 140 * This function allocates a new struct iucv_tty_buffer element and, optionally,
141 * allocates an internal data buffer with the specified size @size. 141 * allocates an internal data buffer with the specified size @size.
142 * The internal data buffer is always allocated with GFP_DMA which is
143 * required for receiving and sending data with IUCV.
142 * Note: The total message size arises from the internal buffer size and the 144 * Note: The total message size arises from the internal buffer size and the
143 * members of the iucv_tty_msg structure. 145 * members of the iucv_tty_msg structure.
144 * The function returns NULL if memory allocation has failed. 146 * The function returns NULL if memory allocation has failed.
@@ -154,7 +156,7 @@ static struct iucv_tty_buffer *alloc_tty_buffer(size_t size, gfp_t flags)
154 156
155 if (size > 0) { 157 if (size > 0) {
156 bufp->msg.length = MSG_SIZE(size); 158 bufp->msg.length = MSG_SIZE(size);
157 bufp->mbuf = kmalloc(bufp->msg.length, flags); 159 bufp->mbuf = kmalloc(bufp->msg.length, flags | GFP_DMA);
158 if (!bufp->mbuf) { 160 if (!bufp->mbuf) {
159 mempool_free(bufp, hvc_iucv_mempool); 161 mempool_free(bufp, hvc_iucv_mempool);
160 return NULL; 162 return NULL;
@@ -237,7 +239,7 @@ static int hvc_iucv_write(struct hvc_iucv_private *priv,
237 if (!rb->mbuf) { /* message not yet received ... */ 239 if (!rb->mbuf) { /* message not yet received ... */
238 /* allocate mem to store msg data; if no memory is available 240 /* allocate mem to store msg data; if no memory is available
239 * then leave the buffer on the list and re-try later */ 241 * then leave the buffer on the list and re-try later */
240 rb->mbuf = kmalloc(rb->msg.length, GFP_ATOMIC); 242 rb->mbuf = kmalloc(rb->msg.length, GFP_ATOMIC | GFP_DMA);
241 if (!rb->mbuf) 243 if (!rb->mbuf)
242 return -ENOMEM; 244 return -ENOMEM;
243 245
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 176f1751237f..4462b113ba3f 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -295,6 +295,9 @@ struct smi_info {
295static int force_kipmid[SI_MAX_PARMS]; 295static int force_kipmid[SI_MAX_PARMS];
296static int num_force_kipmid; 296static int num_force_kipmid;
297 297
298static unsigned int kipmid_max_busy_us[SI_MAX_PARMS];
299static int num_max_busy_us;
300
298static int unload_when_empty = 1; 301static int unload_when_empty = 1;
299 302
300static int try_smi_init(struct smi_info *smi); 303static int try_smi_init(struct smi_info *smi);
@@ -925,23 +928,77 @@ static void set_run_to_completion(void *send_info, int i_run_to_completion)
925 } 928 }
926} 929}
927 930
931/*
932 * Use -1 in the nsec value of the busy waiting timespec to tell that
933 * we are spinning in kipmid looking for something and not delaying
934 * between checks
935 */
936static inline void ipmi_si_set_not_busy(struct timespec *ts)
937{
938 ts->tv_nsec = -1;
939}
940static inline int ipmi_si_is_busy(struct timespec *ts)
941{
942 return ts->tv_nsec != -1;
943}
944
945static int ipmi_thread_busy_wait(enum si_sm_result smi_result,
946 const struct smi_info *smi_info,
947 struct timespec *busy_until)
948{
949 unsigned int max_busy_us = 0;
950
951 if (smi_info->intf_num < num_max_busy_us)
952 max_busy_us = kipmid_max_busy_us[smi_info->intf_num];
953 if (max_busy_us == 0 || smi_result != SI_SM_CALL_WITH_DELAY)
954 ipmi_si_set_not_busy(busy_until);
955 else if (!ipmi_si_is_busy(busy_until)) {
956 getnstimeofday(busy_until);
957 timespec_add_ns(busy_until, max_busy_us*NSEC_PER_USEC);
958 } else {
959 struct timespec now;
960 getnstimeofday(&now);
961 if (unlikely(timespec_compare(&now, busy_until) > 0)) {
962 ipmi_si_set_not_busy(busy_until);
963 return 0;
964 }
965 }
966 return 1;
967}
968
969
970/*
971 * A busy-waiting loop for speeding up IPMI operation.
972 *
973 * Lousy hardware makes this hard. This is only enabled for systems
974 * that are not BT and do not have interrupts. It starts spinning
975 * when an operation is complete or until max_busy tells it to stop
976 * (if that is enabled). See the paragraph on kimid_max_busy_us in
977 * Documentation/IPMI.txt for details.
978 */
928static int ipmi_thread(void *data) 979static int ipmi_thread(void *data)
929{ 980{
930 struct smi_info *smi_info = data; 981 struct smi_info *smi_info = data;
931 unsigned long flags; 982 unsigned long flags;
932 enum si_sm_result smi_result; 983 enum si_sm_result smi_result;
984 struct timespec busy_until;
933 985
986 ipmi_si_set_not_busy(&busy_until);
934 set_user_nice(current, 19); 987 set_user_nice(current, 19);
935 while (!kthread_should_stop()) { 988 while (!kthread_should_stop()) {
989 int busy_wait;
990
936 spin_lock_irqsave(&(smi_info->si_lock), flags); 991 spin_lock_irqsave(&(smi_info->si_lock), flags);
937 smi_result = smi_event_handler(smi_info, 0); 992 smi_result = smi_event_handler(smi_info, 0);
938 spin_unlock_irqrestore(&(smi_info->si_lock), flags); 993 spin_unlock_irqrestore(&(smi_info->si_lock), flags);
994 busy_wait = ipmi_thread_busy_wait(smi_result, smi_info,
995 &busy_until);
939 if (smi_result == SI_SM_CALL_WITHOUT_DELAY) 996 if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
940 ; /* do nothing */ 997 ; /* do nothing */
941 else if (smi_result == SI_SM_CALL_WITH_DELAY) 998 else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait)
942 schedule(); 999 schedule();
943 else 1000 else
944 schedule_timeout_interruptible(1); 1001 schedule_timeout_interruptible(0);
945 } 1002 }
946 return 0; 1003 return 0;
947} 1004}
@@ -1144,7 +1201,7 @@ static int regsizes[SI_MAX_PARMS];
1144static unsigned int num_regsizes; 1201static unsigned int num_regsizes;
1145static int regshifts[SI_MAX_PARMS]; 1202static int regshifts[SI_MAX_PARMS];
1146static unsigned int num_regshifts; 1203static unsigned int num_regshifts;
1147static int slave_addrs[SI_MAX_PARMS]; 1204static int slave_addrs[SI_MAX_PARMS]; /* Leaving 0 chooses the default value */
1148static unsigned int num_slave_addrs; 1205static unsigned int num_slave_addrs;
1149 1206
1150#define IPMI_IO_ADDR_SPACE 0 1207#define IPMI_IO_ADDR_SPACE 0
@@ -1212,6 +1269,11 @@ module_param(unload_when_empty, int, 0);
1212MODULE_PARM_DESC(unload_when_empty, "Unload the module if no interfaces are" 1269MODULE_PARM_DESC(unload_when_empty, "Unload the module if no interfaces are"
1213 " specified or found, default is 1. Setting to 0" 1270 " specified or found, default is 1. Setting to 0"
1214 " is useful for hot add of devices using hotmod."); 1271 " is useful for hot add of devices using hotmod.");
1272module_param_array(kipmid_max_busy_us, uint, &num_max_busy_us, 0644);
1273MODULE_PARM_DESC(kipmid_max_busy_us,
1274 "Max time (in microseconds) to busy-wait for IPMI data before"
1275 " sleeping. 0 (default) means to wait forever. Set to 100-500"
1276 " if kipmid is using up a lot of CPU time.");
1215 1277
1216 1278
1217static void std_irq_cleanup(struct smi_info *info) 1279static void std_irq_cleanup(struct smi_info *info)
@@ -1607,7 +1669,7 @@ static int hotmod_handler(const char *val, struct kernel_param *kp)
1607 regsize = 1; 1669 regsize = 1;
1608 regshift = 0; 1670 regshift = 0;
1609 irq = 0; 1671 irq = 0;
1610 ipmb = 0x20; 1672 ipmb = 0; /* Choose the default if not specified */
1611 1673
1612 next = strchr(curr, ':'); 1674 next = strchr(curr, ':');
1613 if (next) { 1675 if (next) {
@@ -1799,6 +1861,7 @@ static __devinit void hardcode_find_bmc(void)
1799 info->irq = irqs[i]; 1861 info->irq = irqs[i];
1800 if (info->irq) 1862 if (info->irq)
1801 info->irq_setup = std_irq_setup; 1863 info->irq_setup = std_irq_setup;
1864 info->slave_addr = slave_addrs[i];
1802 1865
1803 try_smi_init(info); 1866 try_smi_init(info);
1804 } 1867 }
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 48788db4e280..1f3215ac085b 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds 4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * 5 *
6 * Added devfs support. 6 * Added devfs support.
7 * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu> 7 * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
8 * Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com> 8 * Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
9 */ 9 */
@@ -44,36 +44,6 @@ static inline unsigned long size_inside_page(unsigned long start,
44 return min(sz, size); 44 return min(sz, size);
45} 45}
46 46
47/*
48 * Architectures vary in how they handle caching for addresses
49 * outside of main memory.
50 *
51 */
52static inline int uncached_access(struct file *file, unsigned long addr)
53{
54#if defined(CONFIG_IA64)
55 /*
56 * On ia64, we ignore O_DSYNC because we cannot tolerate memory attribute aliases.
57 */
58 return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
59#elif defined(CONFIG_MIPS)
60 {
61 extern int __uncached_access(struct file *file,
62 unsigned long addr);
63
64 return __uncached_access(file, addr);
65 }
66#else
67 /*
68 * Accessing memory above the top the kernel knows about or through a file pointer
69 * that was marked O_DSYNC will be done non-cached.
70 */
71 if (file->f_flags & O_DSYNC)
72 return 1;
73 return addr >= __pa(high_memory);
74#endif
75}
76
77#ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE 47#ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
78static inline int valid_phys_addr_range(unsigned long addr, size_t count) 48static inline int valid_phys_addr_range(unsigned long addr, size_t count)
79{ 49{
@@ -115,15 +85,15 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
115} 85}
116#endif 86#endif
117 87
118void __attribute__((weak)) unxlate_dev_mem_ptr(unsigned long phys, void *addr) 88void __weak unxlate_dev_mem_ptr(unsigned long phys, void *addr)
119{ 89{
120} 90}
121 91
122/* 92/*
123 * This funcion reads the *physical* memory. The f_pos points directly to the 93 * This funcion reads the *physical* memory. The f_pos points directly to the
124 * memory location. 94 * memory location.
125 */ 95 */
126static ssize_t read_mem(struct file * file, char __user * buf, 96static ssize_t read_mem(struct file *file, char __user *buf,
127 size_t count, loff_t *ppos) 97 size_t count, loff_t *ppos)
128{ 98{
129 unsigned long p = *ppos; 99 unsigned long p = *ppos;
@@ -140,10 +110,10 @@ static ssize_t read_mem(struct file * file, char __user * buf,
140 if (sz > 0) { 110 if (sz > 0) {
141 if (clear_user(buf, sz)) 111 if (clear_user(buf, sz))
142 return -EFAULT; 112 return -EFAULT;
143 buf += sz; 113 buf += sz;
144 p += sz; 114 p += sz;
145 count -= sz; 115 count -= sz;
146 read += sz; 116 read += sz;
147 } 117 }
148 } 118 }
149#endif 119#endif
@@ -157,9 +127,9 @@ static ssize_t read_mem(struct file * file, char __user * buf,
157 return -EPERM; 127 return -EPERM;
158 128
159 /* 129 /*
160 * On ia64 if a page has been mapped somewhere as 130 * On ia64 if a page has been mapped somewhere as uncached, then
161 * uncached, then it must also be accessed uncached 131 * it must also be accessed uncached by the kernel or data
162 * by the kernel or data corruption may occur 132 * corruption may occur.
163 */ 133 */
164 ptr = xlate_dev_mem_ptr(p); 134 ptr = xlate_dev_mem_ptr(p);
165 if (!ptr) 135 if (!ptr)
@@ -180,7 +150,7 @@ static ssize_t read_mem(struct file * file, char __user * buf,
180 return read; 150 return read;
181} 151}
182 152
183static ssize_t write_mem(struct file * file, const char __user * buf, 153static ssize_t write_mem(struct file *file, const char __user *buf,
184 size_t count, loff_t *ppos) 154 size_t count, loff_t *ppos)
185{ 155{
186 unsigned long p = *ppos; 156 unsigned long p = *ppos;
@@ -212,9 +182,9 @@ static ssize_t write_mem(struct file * file, const char __user * buf,
212 return -EPERM; 182 return -EPERM;
213 183
214 /* 184 /*
215 * On ia64 if a page has been mapped somewhere as 185 * On ia64 if a page has been mapped somewhere as uncached, then
216 * uncached, then it must also be accessed uncached 186 * it must also be accessed uncached by the kernel or data
217 * by the kernel or data corruption may occur 187 * corruption may occur.
218 */ 188 */
219 ptr = xlate_dev_mem_ptr(p); 189 ptr = xlate_dev_mem_ptr(p);
220 if (!ptr) { 190 if (!ptr) {
@@ -242,13 +212,46 @@ static ssize_t write_mem(struct file * file, const char __user * buf,
242 return written; 212 return written;
243} 213}
244 214
245int __attribute__((weak)) phys_mem_access_prot_allowed(struct file *file, 215int __weak phys_mem_access_prot_allowed(struct file *file,
246 unsigned long pfn, unsigned long size, pgprot_t *vma_prot) 216 unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
247{ 217{
248 return 1; 218 return 1;
249} 219}
250 220
251#ifndef __HAVE_PHYS_MEM_ACCESS_PROT 221#ifndef __HAVE_PHYS_MEM_ACCESS_PROT
222
223/*
224 * Architectures vary in how they handle caching for addresses
225 * outside of main memory.
226 *
227 */
228static int uncached_access(struct file *file, unsigned long addr)
229{
230#if defined(CONFIG_IA64)
231 /*
232 * On ia64, we ignore O_DSYNC because we cannot tolerate memory
233 * attribute aliases.
234 */
235 return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
236#elif defined(CONFIG_MIPS)
237 {
238 extern int __uncached_access(struct file *file,
239 unsigned long addr);
240
241 return __uncached_access(file, addr);
242 }
243#else
244 /*
245 * Accessing memory above the top the kernel knows about or through a
246 * file pointer
247 * that was marked O_DSYNC will be done non-cached.
248 */
249 if (file->f_flags & O_DSYNC)
250 return 1;
251 return addr >= __pa(high_memory);
252#endif
253}
254
252static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 255static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
253 unsigned long size, pgprot_t vma_prot) 256 unsigned long size, pgprot_t vma_prot)
254{ 257{
@@ -294,7 +297,7 @@ static const struct vm_operations_struct mmap_mem_ops = {
294#endif 297#endif
295}; 298};
296 299
297static int mmap_mem(struct file * file, struct vm_area_struct * vma) 300static int mmap_mem(struct file *file, struct vm_area_struct *vma)
298{ 301{
299 size_t size = vma->vm_end - vma->vm_start; 302 size_t size = vma->vm_end - vma->vm_start;
300 303
@@ -329,7 +332,7 @@ static int mmap_mem(struct file * file, struct vm_area_struct * vma)
329} 332}
330 333
331#ifdef CONFIG_DEVKMEM 334#ifdef CONFIG_DEVKMEM
332static int mmap_kmem(struct file * file, struct vm_area_struct * vma) 335static int mmap_kmem(struct file *file, struct vm_area_struct *vma)
333{ 336{
334 unsigned long pfn; 337 unsigned long pfn;
335 338
@@ -337,9 +340,9 @@ static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
337 pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT; 340 pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
338 341
339 /* 342 /*
340 * RED-PEN: on some architectures there is more mapped memory 343 * RED-PEN: on some architectures there is more mapped memory than
341 * than available in mem_map which pfn_valid checks 344 * available in mem_map which pfn_valid checks for. Perhaps should add a
342 * for. Perhaps should add a new macro here. 345 * new macro here.
343 * 346 *
344 * RED-PEN: vmalloc is not supported right now. 347 * RED-PEN: vmalloc is not supported right now.
345 */ 348 */
@@ -389,7 +392,7 @@ static ssize_t read_oldmem(struct file *file, char __user *buf,
389/* 392/*
390 * This function reads the *virtual* memory as seen by the kernel. 393 * This function reads the *virtual* memory as seen by the kernel.
391 */ 394 */
392static ssize_t read_kmem(struct file *file, char __user *buf, 395static ssize_t read_kmem(struct file *file, char __user *buf,
393 size_t count, loff_t *ppos) 396 size_t count, loff_t *ppos)
394{ 397{
395 unsigned long p = *ppos; 398 unsigned long p = *ppos;
@@ -400,8 +403,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
400 read = 0; 403 read = 0;
401 if (p < (unsigned long) high_memory) { 404 if (p < (unsigned long) high_memory) {
402 low_count = count; 405 low_count = count;
403 if (count > (unsigned long) high_memory - p) 406 if (count > (unsigned long)high_memory - p)
404 low_count = (unsigned long) high_memory - p; 407 low_count = (unsigned long)high_memory - p;
405 408
406#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED 409#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
407 /* we don't have page 0 mapped on sparc and m68k.. */ 410 /* we don't have page 0 mapped on sparc and m68k.. */
@@ -465,9 +468,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
465} 468}
466 469
467 470
468static inline ssize_t 471static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
469do_write_kmem(unsigned long p, const char __user *buf, 472 size_t count, loff_t *ppos)
470 size_t count, loff_t *ppos)
471{ 473{
472 ssize_t written, sz; 474 ssize_t written, sz;
473 unsigned long copied; 475 unsigned long copied;
@@ -491,9 +493,9 @@ do_write_kmem(unsigned long p, const char __user *buf,
491 sz = size_inside_page(p, count); 493 sz = size_inside_page(p, count);
492 494
493 /* 495 /*
494 * On ia64 if a page has been mapped somewhere as 496 * On ia64 if a page has been mapped somewhere as uncached, then
495 * uncached, then it must also be accessed uncached 497 * it must also be accessed uncached by the kernel or data
496 * by the kernel or data corruption may occur 498 * corruption may occur.
497 */ 499 */
498 ptr = xlate_dev_kmem_ptr((char *)p); 500 ptr = xlate_dev_kmem_ptr((char *)p);
499 501
@@ -514,11 +516,10 @@ do_write_kmem(unsigned long p, const char __user *buf,
514 return written; 516 return written;
515} 517}
516 518
517
518/* 519/*
519 * This function writes to the *virtual* memory as seen by the kernel. 520 * This function writes to the *virtual* memory as seen by the kernel.
520 */ 521 */
521static ssize_t write_kmem(struct file * file, const char __user * buf, 522static ssize_t write_kmem(struct file *file, const char __user *buf,
522 size_t count, loff_t *ppos) 523 size_t count, loff_t *ppos)
523{ 524{
524 unsigned long p = *ppos; 525 unsigned long p = *ppos;
@@ -570,17 +571,17 @@ static ssize_t write_kmem(struct file * file, const char __user * buf,
570#endif 571#endif
571 572
572#ifdef CONFIG_DEVPORT 573#ifdef CONFIG_DEVPORT
573static ssize_t read_port(struct file * file, char __user * buf, 574static ssize_t read_port(struct file *file, char __user *buf,
574 size_t count, loff_t *ppos) 575 size_t count, loff_t *ppos)
575{ 576{
576 unsigned long i = *ppos; 577 unsigned long i = *ppos;
577 char __user *tmp = buf; 578 char __user *tmp = buf;
578 579
579 if (!access_ok(VERIFY_WRITE, buf, count)) 580 if (!access_ok(VERIFY_WRITE, buf, count))
580 return -EFAULT; 581 return -EFAULT;
581 while (count-- > 0 && i < 65536) { 582 while (count-- > 0 && i < 65536) {
582 if (__put_user(inb(i),tmp) < 0) 583 if (__put_user(inb(i), tmp) < 0)
583 return -EFAULT; 584 return -EFAULT;
584 i++; 585 i++;
585 tmp++; 586 tmp++;
586 } 587 }
@@ -588,22 +589,22 @@ static ssize_t read_port(struct file * file, char __user * buf,
588 return tmp-buf; 589 return tmp-buf;
589} 590}
590 591
591static ssize_t write_port(struct file * file, const char __user * buf, 592static ssize_t write_port(struct file *file, const char __user *buf,
592 size_t count, loff_t *ppos) 593 size_t count, loff_t *ppos)
593{ 594{
594 unsigned long i = *ppos; 595 unsigned long i = *ppos;
595 const char __user * tmp = buf; 596 const char __user * tmp = buf;
596 597
597 if (!access_ok(VERIFY_READ,buf,count)) 598 if (!access_ok(VERIFY_READ, buf, count))
598 return -EFAULT; 599 return -EFAULT;
599 while (count-- > 0 && i < 65536) { 600 while (count-- > 0 && i < 65536) {
600 char c; 601 char c;
601 if (__get_user(c, tmp)) { 602 if (__get_user(c, tmp)) {
602 if (tmp > buf) 603 if (tmp > buf)
603 break; 604 break;
604 return -EFAULT; 605 return -EFAULT;
605 } 606 }
606 outb(c,i); 607 outb(c, i);
607 i++; 608 i++;
608 tmp++; 609 tmp++;
609 } 610 }
@@ -612,13 +613,13 @@ static ssize_t write_port(struct file * file, const char __user * buf,
612} 613}
613#endif 614#endif
614 615
615static ssize_t read_null(struct file * file, char __user * buf, 616static ssize_t read_null(struct file *file, char __user *buf,
616 size_t count, loff_t *ppos) 617 size_t count, loff_t *ppos)
617{ 618{
618 return 0; 619 return 0;
619} 620}
620 621
621static ssize_t write_null(struct file * file, const char __user * buf, 622static ssize_t write_null(struct file *file, const char __user *buf,
622 size_t count, loff_t *ppos) 623 size_t count, loff_t *ppos)
623{ 624{
624 return count; 625 return count;
@@ -630,13 +631,13 @@ static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
630 return sd->len; 631 return sd->len;
631} 632}
632 633
633static ssize_t splice_write_null(struct pipe_inode_info *pipe,struct file *out, 634static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
634 loff_t *ppos, size_t len, unsigned int flags) 635 loff_t *ppos, size_t len, unsigned int flags)
635{ 636{
636 return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null); 637 return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
637} 638}
638 639
639static ssize_t read_zero(struct file * file, char __user * buf, 640static ssize_t read_zero(struct file *file, char __user *buf,
640 size_t count, loff_t *ppos) 641 size_t count, loff_t *ppos)
641{ 642{
642 size_t written; 643 size_t written;
@@ -667,7 +668,7 @@ static ssize_t read_zero(struct file * file, char __user * buf,
667 return written ? written : -EFAULT; 668 return written ? written : -EFAULT;
668} 669}
669 670
670static int mmap_zero(struct file * file, struct vm_area_struct * vma) 671static int mmap_zero(struct file *file, struct vm_area_struct *vma)
671{ 672{
672#ifndef CONFIG_MMU 673#ifndef CONFIG_MMU
673 return -ENOSYS; 674 return -ENOSYS;
@@ -677,7 +678,7 @@ static int mmap_zero(struct file * file, struct vm_area_struct * vma)
677 return 0; 678 return 0;
678} 679}
679 680
680static ssize_t write_full(struct file * file, const char __user * buf, 681static ssize_t write_full(struct file *file, const char __user *buf,
681 size_t count, loff_t *ppos) 682 size_t count, loff_t *ppos)
682{ 683{
683 return -ENOSPC; 684 return -ENOSPC;
@@ -688,8 +689,7 @@ static ssize_t write_full(struct file * file, const char __user * buf,
688 * can fopen() both devices with "a" now. This was previously impossible. 689 * can fopen() both devices with "a" now. This was previously impossible.
689 * -- SRB. 690 * -- SRB.
690 */ 691 */
691 692static loff_t null_lseek(struct file *file, loff_t offset, int orig)
692static loff_t null_lseek(struct file * file, loff_t offset, int orig)
693{ 693{
694 return file->f_pos = 0; 694 return file->f_pos = 0;
695} 695}
@@ -702,24 +702,31 @@ static loff_t null_lseek(struct file * file, loff_t offset, int orig)
702 * also note that seeking relative to the "end of file" isn't supported: 702 * also note that seeking relative to the "end of file" isn't supported:
703 * it has no meaning, so it returns -EINVAL. 703 * it has no meaning, so it returns -EINVAL.
704 */ 704 */
705static loff_t memory_lseek(struct file * file, loff_t offset, int orig) 705static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
706{ 706{
707 loff_t ret; 707 loff_t ret;
708 708
709 mutex_lock(&file->f_path.dentry->d_inode->i_mutex); 709 mutex_lock(&file->f_path.dentry->d_inode->i_mutex);
710 switch (orig) { 710 switch (orig) {
711 case 0: 711 case SEEK_CUR:
712 file->f_pos = offset; 712 offset += file->f_pos;
713 ret = file->f_pos; 713 if ((unsigned long long)offset <
714 force_successful_syscall_return(); 714 (unsigned long long)file->f_pos) {
715 ret = -EOVERFLOW;
715 break; 716 break;
716 case 1: 717 }
717 file->f_pos += offset; 718 case SEEK_SET:
718 ret = file->f_pos; 719 /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
719 force_successful_syscall_return(); 720 if ((unsigned long long)offset >= ~0xFFFULL) {
721 ret = -EOVERFLOW;
720 break; 722 break;
721 default: 723 }
722 ret = -EINVAL; 724 file->f_pos = offset;
725 ret = file->f_pos;
726 force_successful_syscall_return();
727 break;
728 default:
729 ret = -EINVAL;
723 } 730 }
724 mutex_unlock(&file->f_path.dentry->d_inode->i_mutex); 731 mutex_unlock(&file->f_path.dentry->d_inode->i_mutex);
725 return ret; 732 return ret;
@@ -803,7 +810,7 @@ static const struct file_operations oldmem_fops = {
803}; 810};
804#endif 811#endif
805 812
806static ssize_t kmsg_write(struct file * file, const char __user * buf, 813static ssize_t kmsg_write(struct file *file, const char __user *buf,
807 size_t count, loff_t *ppos) 814 size_t count, loff_t *ppos)
808{ 815{
809 char *tmp; 816 char *tmp;
@@ -825,7 +832,7 @@ static ssize_t kmsg_write(struct file * file, const char __user * buf,
825} 832}
826 833
827static const struct file_operations kmsg_fops = { 834static const struct file_operations kmsg_fops = {
828 .write = kmsg_write, 835 .write = kmsg_write,
829}; 836};
830 837
831static const struct memdev { 838static const struct memdev {
@@ -876,7 +883,7 @@ static int memory_open(struct inode *inode, struct file *filp)
876} 883}
877 884
878static const struct file_operations memory_fops = { 885static const struct file_operations memory_fops = {
879 .open = memory_open, 886 .open = memory_open,
880}; 887};
881 888
882static char *mem_devnode(struct device *dev, mode_t *mode) 889static char *mem_devnode(struct device *dev, mode_t *mode)
@@ -897,7 +904,7 @@ static int __init chr_dev_init(void)
897 if (err) 904 if (err)
898 return err; 905 return err;
899 906
900 if (register_chrdev(MEM_MAJOR,"mem",&memory_fops)) 907 if (register_chrdev(MEM_MAJOR, "mem", &memory_fops))
901 printk("unable to get major %d for memory devs\n", MEM_MAJOR); 908 printk("unable to get major %d for memory devs\n", MEM_MAJOR);
902 909
903 mem_class = class_create(THIS_MODULE, "mem"); 910 mem_class = class_create(THIS_MODULE, "mem");
diff --git a/drivers/char/mmtimer.c b/drivers/char/mmtimer.c
index 918711aa56f3..04fd0d843b3b 100644
--- a/drivers/char/mmtimer.c
+++ b/drivers/char/mmtimer.c
@@ -546,7 +546,7 @@ static void mmtimer_tasklet(unsigned long data)
546{ 546{
547 int nodeid = data; 547 int nodeid = data;
548 struct mmtimer_node *mn = &timers[nodeid]; 548 struct mmtimer_node *mn = &timers[nodeid];
549 struct mmtimer *x = rb_entry(mn->next, struct mmtimer, list); 549 struct mmtimer *x;
550 struct k_itimer *t; 550 struct k_itimer *t;
551 unsigned long flags; 551 unsigned long flags;
552 552
diff --git a/drivers/char/n_tty.c b/drivers/char/n_tty.c
index 2e50f4dfc79c..bdae8327143c 100644
--- a/drivers/char/n_tty.c
+++ b/drivers/char/n_tty.c
@@ -48,6 +48,7 @@
48#include <linux/audit.h> 48#include <linux/audit.h>
49#include <linux/file.h> 49#include <linux/file.h>
50#include <linux/uaccess.h> 50#include <linux/uaccess.h>
51#include <linux/module.h>
51 52
52#include <asm/system.h> 53#include <asm/system.h>
53 54
@@ -2091,3 +2092,19 @@ struct tty_ldisc_ops tty_ldisc_N_TTY = {
2091 .receive_buf = n_tty_receive_buf, 2092 .receive_buf = n_tty_receive_buf,
2092 .write_wakeup = n_tty_write_wakeup 2093 .write_wakeup = n_tty_write_wakeup
2093}; 2094};
2095
2096/**
2097 * n_tty_inherit_ops - inherit N_TTY methods
2098 * @ops: struct tty_ldisc_ops where to save N_TTY methods
2099 *
2100 * Used by a generic struct tty_ldisc_ops to easily inherit N_TTY
2101 * methods.
2102 */
2103
2104void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
2105{
2106 *ops = tty_ldisc_N_TTY;
2107 ops->owner = NULL;
2108 ops->refcount = ops->flags = 0;
2109}
2110EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
diff --git a/drivers/char/tty_audit.c b/drivers/char/tty_audit.c
index ac16fbec72d0..283a15bc84e3 100644
--- a/drivers/char/tty_audit.c
+++ b/drivers/char/tty_audit.c
@@ -148,7 +148,6 @@ void tty_audit_fork(struct signal_struct *sig)
148 spin_lock_irq(&current->sighand->siglock); 148 spin_lock_irq(&current->sighand->siglock);
149 sig->audit_tty = current->signal->audit_tty; 149 sig->audit_tty = current->signal->audit_tty;
150 spin_unlock_irq(&current->sighand->siglock); 150 spin_unlock_irq(&current->sighand->siglock);
151 sig->tty_audit_buf = NULL;
152} 151}
153 152
154/** 153/**