aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/mips/kernel/irixsig.c4
-rw-r--r--arch/um/kernel/smp.c1
-rw-r--r--drivers/char/tty_io.c20
-rw-r--r--drivers/ieee1394/dv1394.c41
-rw-r--r--drivers/ieee1394/ieee1394_core.c16
-rw-r--r--drivers/ieee1394/ieee1394_core.h3
-rw-r--r--drivers/ieee1394/ohci1394.c49
-rw-r--r--drivers/ieee1394/raw1394.c6
-rw-r--r--drivers/ieee1394/sbp2.c137
-rw-r--r--drivers/ieee1394/video1394.c16
-rw-r--r--fs/direct-io.c7
-rw-r--r--fs/exec.c30
-rw-r--r--fs/xfs/linux-2.6/mrlock.h2
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c67
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.h2
-rw-r--r--fs/xfs/linux-2.6/xfs_export.h2
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl32.c22
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_lrw.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_vfs.h2
-rw-r--r--fs/xfs/quota/xfs_dquot_item.c2
-rw-r--r--fs/xfs/quota/xfs_qm.c14
-rw-r--r--fs/xfs/quota/xfs_qm_syscalls.c2
-rw-r--r--fs/xfs/quota/xfs_trans_dquot.c2
-rw-r--r--fs/xfs/xfs_acl.c2
-rw-r--r--fs/xfs/xfs_ag.h2
-rw-r--r--fs/xfs/xfs_alloc.c6
-rw-r--r--fs/xfs/xfs_alloc.h2
-rw-r--r--fs/xfs/xfs_attr.c6
-rw-r--r--fs/xfs/xfs_attr_leaf.c4
-rw-r--r--fs/xfs/xfs_behavior.c4
-rw-r--r--fs/xfs/xfs_behavior.h4
-rw-r--r--fs/xfs/xfs_bmap.c107
-rw-r--r--fs/xfs/xfs_bmap.h8
-rw-r--r--fs/xfs/xfs_buf_item.c4
-rw-r--r--fs/xfs/xfs_cap.h2
-rw-r--r--fs/xfs/xfs_da_btree.c2
-rw-r--r--fs/xfs/xfs_dir2_block.c2
-rw-r--r--fs/xfs/xfs_dir2_leaf.c2
-rw-r--r--fs/xfs/xfs_dir2_node.c2
-rw-r--r--fs/xfs/xfs_dir_leaf.c2
-rw-r--r--fs/xfs/xfs_fsops.c2
-rw-r--r--fs/xfs/xfs_ialloc.c110
-rw-r--r--fs/xfs/xfs_iget.c2
-rw-r--r--fs/xfs/xfs_inode.c14
-rw-r--r--fs/xfs/xfs_inode_item.c2
-rw-r--r--fs/xfs/xfs_itable.c4
-rw-r--r--fs/xfs/xfs_itable.h2
-rw-r--r--fs/xfs/xfs_log.c22
-rw-r--r--fs/xfs/xfs_log.h2
-rw-r--r--fs/xfs/xfs_log_recover.c4
-rw-r--r--fs/xfs/xfs_mount.c4
-rw-r--r--fs/xfs/xfs_mount.h2
-rw-r--r--fs/xfs/xfs_quota.h4
-rw-r--r--fs/xfs/xfs_trans.c6
-rw-r--r--fs/xfs/xfs_trans.h2
-rw-r--r--fs/xfs/xfs_trans_inode.c2
-rw-r--r--fs/xfs/xfs_vfsops.c10
-rw-r--r--fs/xfs/xfs_vnodeops.c12
-rw-r--r--include/linux/init_task.h2
-rw-r--r--include/linux/pid.h2
-rw-r--r--include/linux/sched.h51
-rw-r--r--include/linux/signal.h2
-rw-r--r--include/linux/slab.h1
-rw-r--r--kernel/exit.c129
-rw-r--r--kernel/fork.c121
-rw-r--r--kernel/kmod.c2
-rw-r--r--kernel/pid.c40
-rw-r--r--kernel/ptrace.c8
-rw-r--r--kernel/signal.c344
-rw-r--r--kernel/sys.c73
71 files changed, 599 insertions, 994 deletions
diff --git a/arch/mips/kernel/irixsig.c b/arch/mips/kernel/irixsig.c
index 08273a2a501d..8150f071f80a 100644
--- a/arch/mips/kernel/irixsig.c
+++ b/arch/mips/kernel/irixsig.c
@@ -603,7 +603,7 @@ repeat:
603 /* move to end of parent's list to avoid starvation */ 603 /* move to end of parent's list to avoid starvation */
604 write_lock_irq(&tasklist_lock); 604 write_lock_irq(&tasklist_lock);
605 remove_parent(p); 605 remove_parent(p);
606 add_parent(p, p->parent); 606 add_parent(p);
607 write_unlock_irq(&tasklist_lock); 607 write_unlock_irq(&tasklist_lock);
608 retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0; 608 retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
609 if (retval) 609 if (retval)
@@ -643,7 +643,7 @@ repeat:
643 write_lock_irq(&tasklist_lock); 643 write_lock_irq(&tasklist_lock);
644 remove_parent(p); 644 remove_parent(p);
645 p->parent = p->real_parent; 645 p->parent = p->real_parent;
646 add_parent(p, p->parent); 646 add_parent(p);
647 do_notify_parent(p, SIGCHLD); 647 do_notify_parent(p, SIGCHLD);
648 write_unlock_irq(&tasklist_lock); 648 write_unlock_irq(&tasklist_lock);
649 } else 649 } else
diff --git a/arch/um/kernel/smp.c b/arch/um/kernel/smp.c
index c8d8d0ac1a7f..511116aebaf7 100644
--- a/arch/um/kernel/smp.c
+++ b/arch/um/kernel/smp.c
@@ -143,7 +143,6 @@ void smp_prepare_cpus(unsigned int maxcpus)
143 idle = idle_thread(cpu); 143 idle = idle_thread(cpu);
144 144
145 init_idle(idle, cpu); 145 init_idle(idle, cpu);
146 unhash_process(idle);
147 146
148 waittime = 200000000; 147 waittime = 200000000;
149 while (waittime-- && !cpu_isset(cpu, cpu_callin_map)) 148 while (waittime-- && !cpu_isset(cpu, cpu_callin_map))
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index 811dadb9ce3e..0bfd1b63662e 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -1094,8 +1094,8 @@ static void do_tty_hangup(void *data)
1094 p->signal->tty = NULL; 1094 p->signal->tty = NULL;
1095 if (!p->signal->leader) 1095 if (!p->signal->leader)
1096 continue; 1096 continue;
1097 send_group_sig_info(SIGHUP, SEND_SIG_PRIV, p); 1097 group_send_sig_info(SIGHUP, SEND_SIG_PRIV, p);
1098 send_group_sig_info(SIGCONT, SEND_SIG_PRIV, p); 1098 group_send_sig_info(SIGCONT, SEND_SIG_PRIV, p);
1099 if (tty->pgrp > 0) 1099 if (tty->pgrp > 0)
1100 p->signal->tty_old_pgrp = tty->pgrp; 1100 p->signal->tty_old_pgrp = tty->pgrp;
1101 } while_each_task_pid(tty->session, PIDTYPE_SID, p); 1101 } while_each_task_pid(tty->session, PIDTYPE_SID, p);
@@ -2672,7 +2672,7 @@ static void __do_SAK(void *arg)
2672 tty_hangup(tty); 2672 tty_hangup(tty);
2673#else 2673#else
2674 struct tty_struct *tty = arg; 2674 struct tty_struct *tty = arg;
2675 struct task_struct *p; 2675 struct task_struct *g, *p;
2676 int session; 2676 int session;
2677 int i; 2677 int i;
2678 struct file *filp; 2678 struct file *filp;
@@ -2693,8 +2693,18 @@ static void __do_SAK(void *arg)
2693 tty->driver->flush_buffer(tty); 2693 tty->driver->flush_buffer(tty);
2694 2694
2695 read_lock(&tasklist_lock); 2695 read_lock(&tasklist_lock);
2696 /* Kill the entire session */
2696 do_each_task_pid(session, PIDTYPE_SID, p) { 2697 do_each_task_pid(session, PIDTYPE_SID, p) {
2697 if (p->signal->tty == tty || session > 0) { 2698 printk(KERN_NOTICE "SAK: killed process %d"
2699 " (%s): p->signal->session==tty->session\n",
2700 p->pid, p->comm);
2701 send_sig(SIGKILL, p, 1);
2702 } while_each_task_pid(session, PIDTYPE_SID, p);
2703 /* Now kill any processes that happen to have the
2704 * tty open.
2705 */
2706 do_each_thread(g, p) {
2707 if (p->signal->tty == tty) {
2698 printk(KERN_NOTICE "SAK: killed process %d" 2708 printk(KERN_NOTICE "SAK: killed process %d"
2699 " (%s): p->signal->session==tty->session\n", 2709 " (%s): p->signal->session==tty->session\n",
2700 p->pid, p->comm); 2710 p->pid, p->comm);
@@ -2721,7 +2731,7 @@ static void __do_SAK(void *arg)
2721 rcu_read_unlock(); 2731 rcu_read_unlock();
2722 } 2732 }
2723 task_unlock(p); 2733 task_unlock(p);
2724 } while_each_task_pid(session, PIDTYPE_SID, p); 2734 } while_each_thread(g, p);
2725 read_unlock(&tasklist_lock); 2735 read_unlock(&tasklist_lock);
2726#endif 2736#endif
2727} 2737}
diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c
index efeaa944bd0a..85c2d4ca0def 100644
--- a/drivers/ieee1394/dv1394.c
+++ b/drivers/ieee1394/dv1394.c
@@ -73,7 +73,7 @@
73 - fix all XXX showstoppers 73 - fix all XXX showstoppers
74 - disable IR/IT DMA interrupts on shutdown 74 - disable IR/IT DMA interrupts on shutdown
75 - flush pci writes to the card by issuing a read 75 - flush pci writes to the card by issuing a read
76 - devfs and character device dispatching (* needs testing with Linux 2.2.x) 76 - character device dispatching
77 - switch over to the new kernel DMA API (pci_map_*()) (* needs testing on platforms with IOMMU!) 77 - switch over to the new kernel DMA API (pci_map_*()) (* needs testing on platforms with IOMMU!)
78 - keep all video_cards in a list (for open() via chardev), set file->private_data = video 78 - keep all video_cards in a list (for open() via chardev), set file->private_data = video
79 - dv1394_poll should indicate POLLIN when receiving buffers are available 79 - dv1394_poll should indicate POLLIN when receiving buffers are available
@@ -1096,7 +1096,6 @@ static int do_dv1394_init_default(struct video_card *video)
1096 1096
1097 init.api_version = DV1394_API_VERSION; 1097 init.api_version = DV1394_API_VERSION;
1098 init.n_frames = DV1394_MAX_FRAMES / 4; 1098 init.n_frames = DV1394_MAX_FRAMES / 4;
1099 /* the following are now set via devfs */
1100 init.channel = video->channel; 1099 init.channel = video->channel;
1101 init.format = video->pal_or_ntsc; 1100 init.format = video->pal_or_ntsc;
1102 init.cip_n = video->cip_n; 1101 init.cip_n = video->cip_n;
@@ -1791,8 +1790,6 @@ static int dv1394_open(struct inode *inode, struct file *file)
1791{ 1790{
1792 struct video_card *video = NULL; 1791 struct video_card *video = NULL;
1793 1792
1794 /* if the device was opened through devfs, then file->private_data
1795 has already been set to video by devfs */
1796 if (file->private_data) { 1793 if (file->private_data) {
1797 video = (struct video_card*) file->private_data; 1794 video = (struct video_card*) file->private_data;
1798 1795
@@ -2211,7 +2208,7 @@ static int dv1394_init(struct ti_ohci *ohci, enum pal_or_ntsc format, enum modes
2211 video = kzalloc(sizeof(*video), GFP_KERNEL); 2208 video = kzalloc(sizeof(*video), GFP_KERNEL);
2212 if (!video) { 2209 if (!video) {
2213 printk(KERN_ERR "dv1394: cannot allocate video_card\n"); 2210 printk(KERN_ERR "dv1394: cannot allocate video_card\n");
2214 goto err; 2211 return -1;
2215 } 2212 }
2216 2213
2217 video->ohci = ohci; 2214 video->ohci = ohci;
@@ -2266,37 +2263,14 @@ static int dv1394_init(struct ti_ohci *ohci, enum pal_or_ntsc format, enum modes
2266 list_add_tail(&video->list, &dv1394_cards); 2263 list_add_tail(&video->list, &dv1394_cards);
2267 spin_unlock_irqrestore(&dv1394_cards_lock, flags); 2264 spin_unlock_irqrestore(&dv1394_cards_lock, flags);
2268 2265
2269 if (devfs_mk_cdev(MKDEV(IEEE1394_MAJOR,
2270 IEEE1394_MINOR_BLOCK_DV1394*16 + video->id),
2271 S_IFCHR|S_IRUGO|S_IWUGO,
2272 "ieee1394/dv/host%d/%s/%s",
2273 (video->id>>2),
2274 (video->pal_or_ntsc == DV1394_NTSC ? "NTSC" : "PAL"),
2275 (video->mode == MODE_RECEIVE ? "in" : "out")) < 0)
2276 goto err_free;
2277
2278 debug_printk("dv1394: dv1394_init() OK on ID %d\n", video->id); 2266 debug_printk("dv1394: dv1394_init() OK on ID %d\n", video->id);
2279
2280 return 0; 2267 return 0;
2281
2282 err_free:
2283 kfree(video);
2284 err:
2285 return -1;
2286} 2268}
2287 2269
2288static void dv1394_un_init(struct video_card *video) 2270static void dv1394_un_init(struct video_card *video)
2289{ 2271{
2290 char buf[32];
2291
2292 /* obviously nobody has the driver open at this point */ 2272 /* obviously nobody has the driver open at this point */
2293 do_dv1394_shutdown(video, 1); 2273 do_dv1394_shutdown(video, 1);
2294 snprintf(buf, sizeof(buf), "dv/host%d/%s/%s", (video->id >> 2),
2295 (video->pal_or_ntsc == DV1394_NTSC ? "NTSC" : "PAL"),
2296 (video->mode == MODE_RECEIVE ? "in" : "out")
2297 );
2298
2299 devfs_remove("ieee1394/%s", buf);
2300 kfree(video); 2274 kfree(video);
2301} 2275}
2302 2276
@@ -2333,9 +2307,6 @@ static void dv1394_remove_host (struct hpsb_host *host)
2333 2307
2334 class_device_destroy(hpsb_protocol_class, 2308 class_device_destroy(hpsb_protocol_class,
2335 MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_DV1394 * 16 + (id<<2))); 2309 MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_DV1394 * 16 + (id<<2)));
2336 devfs_remove("ieee1394/dv/host%d/NTSC", id);
2337 devfs_remove("ieee1394/dv/host%d/PAL", id);
2338 devfs_remove("ieee1394/dv/host%d", id);
2339} 2310}
2340 2311
2341static void dv1394_add_host (struct hpsb_host *host) 2312static void dv1394_add_host (struct hpsb_host *host)
@@ -2352,9 +2323,6 @@ static void dv1394_add_host (struct hpsb_host *host)
2352 class_device_create(hpsb_protocol_class, NULL, MKDEV( 2323 class_device_create(hpsb_protocol_class, NULL, MKDEV(
2353 IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_DV1394 * 16 + (id<<2)), 2324 IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_DV1394 * 16 + (id<<2)),
2354 NULL, "dv1394-%d", id); 2325 NULL, "dv1394-%d", id);
2355 devfs_mk_dir("ieee1394/dv/host%d", id);
2356 devfs_mk_dir("ieee1394/dv/host%d/NTSC", id);
2357 devfs_mk_dir("ieee1394/dv/host%d/PAL", id);
2358 2326
2359 dv1394_init(ohci, DV1394_NTSC, MODE_RECEIVE); 2327 dv1394_init(ohci, DV1394_NTSC, MODE_RECEIVE);
2360 dv1394_init(ohci, DV1394_NTSC, MODE_TRANSMIT); 2328 dv1394_init(ohci, DV1394_NTSC, MODE_TRANSMIT);
@@ -2611,10 +2579,8 @@ MODULE_LICENSE("GPL");
2611static void __exit dv1394_exit_module(void) 2579static void __exit dv1394_exit_module(void)
2612{ 2580{
2613 hpsb_unregister_protocol(&dv1394_driver); 2581 hpsb_unregister_protocol(&dv1394_driver);
2614
2615 hpsb_unregister_highlevel(&dv1394_highlevel); 2582 hpsb_unregister_highlevel(&dv1394_highlevel);
2616 cdev_del(&dv1394_cdev); 2583 cdev_del(&dv1394_cdev);
2617 devfs_remove("ieee1394/dv");
2618} 2584}
2619 2585
2620static int __init dv1394_init_module(void) 2586static int __init dv1394_init_module(void)
@@ -2630,15 +2596,12 @@ static int __init dv1394_init_module(void)
2630 return ret; 2596 return ret;
2631 } 2597 }
2632 2598
2633 devfs_mk_dir("ieee1394/dv");
2634
2635 hpsb_register_highlevel(&dv1394_highlevel); 2599 hpsb_register_highlevel(&dv1394_highlevel);
2636 2600
2637 ret = hpsb_register_protocol(&dv1394_driver); 2601 ret = hpsb_register_protocol(&dv1394_driver);
2638 if (ret) { 2602 if (ret) {
2639 printk(KERN_ERR "dv1394: failed to register protocol\n"); 2603 printk(KERN_ERR "dv1394: failed to register protocol\n");
2640 hpsb_unregister_highlevel(&dv1394_highlevel); 2604 hpsb_unregister_highlevel(&dv1394_highlevel);
2641 devfs_remove("ieee1394/dv");
2642 cdev_del(&dv1394_cdev); 2605 cdev_del(&dv1394_cdev);
2643 return ret; 2606 return ret;
2644 } 2607 }
diff --git a/drivers/ieee1394/ieee1394_core.c b/drivers/ieee1394/ieee1394_core.c
index 25ef5a86f5f0..be6854e25ad4 100644
--- a/drivers/ieee1394/ieee1394_core.c
+++ b/drivers/ieee1394/ieee1394_core.c
@@ -58,7 +58,7 @@ MODULE_PARM_DESC(disable_nodemgr, "Disable nodemgr functionality.");
58 58
59/* Disable Isochronous Resource Manager functionality */ 59/* Disable Isochronous Resource Manager functionality */
60int hpsb_disable_irm = 0; 60int hpsb_disable_irm = 0;
61module_param_named(disable_irm, hpsb_disable_irm, bool, 0); 61module_param_named(disable_irm, hpsb_disable_irm, bool, 0444);
62MODULE_PARM_DESC(disable_irm, 62MODULE_PARM_DESC(disable_irm,
63 "Disable Isochronous Resource Manager functionality."); 63 "Disable Isochronous Resource Manager functionality.");
64 64
@@ -1078,17 +1078,10 @@ static int __init ieee1394_init(void)
1078 goto exit_release_kernel_thread; 1078 goto exit_release_kernel_thread;
1079 } 1079 }
1080 1080
1081 /* actually this is a non-fatal error */
1082 ret = devfs_mk_dir("ieee1394");
1083 if (ret < 0) {
1084 HPSB_ERR("unable to make devfs dir for device major %d!\n", IEEE1394_MAJOR);
1085 goto release_chrdev;
1086 }
1087
1088 ret = bus_register(&ieee1394_bus_type); 1081 ret = bus_register(&ieee1394_bus_type);
1089 if (ret < 0) { 1082 if (ret < 0) {
1090 HPSB_INFO("bus register failed"); 1083 HPSB_INFO("bus register failed");
1091 goto release_devfs; 1084 goto release_chrdev;
1092 } 1085 }
1093 1086
1094 for (i = 0; fw_bus_attrs[i]; i++) { 1087 for (i = 0; fw_bus_attrs[i]; i++) {
@@ -1099,7 +1092,7 @@ static int __init ieee1394_init(void)
1099 fw_bus_attrs[i--]); 1092 fw_bus_attrs[i--]);
1100 } 1093 }
1101 bus_unregister(&ieee1394_bus_type); 1094 bus_unregister(&ieee1394_bus_type);
1102 goto release_devfs; 1095 goto release_chrdev;
1103 } 1096 }
1104 } 1097 }
1105 1098
@@ -1152,8 +1145,6 @@ release_all_bus:
1152 for (i = 0; fw_bus_attrs[i]; i++) 1145 for (i = 0; fw_bus_attrs[i]; i++)
1153 bus_remove_file(&ieee1394_bus_type, fw_bus_attrs[i]); 1146 bus_remove_file(&ieee1394_bus_type, fw_bus_attrs[i]);
1154 bus_unregister(&ieee1394_bus_type); 1147 bus_unregister(&ieee1394_bus_type);
1155release_devfs:
1156 devfs_remove("ieee1394");
1157release_chrdev: 1148release_chrdev:
1158 unregister_chrdev_region(IEEE1394_CORE_DEV, 256); 1149 unregister_chrdev_region(IEEE1394_CORE_DEV, 256);
1159exit_release_kernel_thread: 1150exit_release_kernel_thread:
@@ -1191,7 +1182,6 @@ static void __exit ieee1394_cleanup(void)
1191 hpsb_cleanup_config_roms(); 1182 hpsb_cleanup_config_roms();
1192 1183
1193 unregister_chrdev_region(IEEE1394_CORE_DEV, 256); 1184 unregister_chrdev_region(IEEE1394_CORE_DEV, 256);
1194 devfs_remove("ieee1394");
1195} 1185}
1196 1186
1197module_init(ieee1394_init); 1187module_init(ieee1394_init);
diff --git a/drivers/ieee1394/ieee1394_core.h b/drivers/ieee1394/ieee1394_core.h
index b35466023f00..e7b55e895f50 100644
--- a/drivers/ieee1394/ieee1394_core.h
+++ b/drivers/ieee1394/ieee1394_core.h
@@ -3,7 +3,6 @@
3#define _IEEE1394_CORE_H 3#define _IEEE1394_CORE_H
4 4
5#include <linux/slab.h> 5#include <linux/slab.h>
6#include <linux/devfs_fs_kernel.h>
7#include <asm/atomic.h> 6#include <asm/atomic.h>
8#include <asm/semaphore.h> 7#include <asm/semaphore.h>
9#include "hosts.h" 8#include "hosts.h"
@@ -202,14 +201,12 @@ void hpsb_packet_received(struct hpsb_host *host, quadlet_t *data, size_t size,
202#define IEEE1394_MINOR_BLOCK_RAW1394 0 201#define IEEE1394_MINOR_BLOCK_RAW1394 0
203#define IEEE1394_MINOR_BLOCK_VIDEO1394 1 202#define IEEE1394_MINOR_BLOCK_VIDEO1394 1
204#define IEEE1394_MINOR_BLOCK_DV1394 2 203#define IEEE1394_MINOR_BLOCK_DV1394 2
205#define IEEE1394_MINOR_BLOCK_AMDTP 3
206#define IEEE1394_MINOR_BLOCK_EXPERIMENTAL 15 204#define IEEE1394_MINOR_BLOCK_EXPERIMENTAL 15
207 205
208#define IEEE1394_CORE_DEV MKDEV(IEEE1394_MAJOR, 0) 206#define IEEE1394_CORE_DEV MKDEV(IEEE1394_MAJOR, 0)
209#define IEEE1394_RAW1394_DEV MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_RAW1394 * 16) 207#define IEEE1394_RAW1394_DEV MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_RAW1394 * 16)
210#define IEEE1394_VIDEO1394_DEV MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_VIDEO1394 * 16) 208#define IEEE1394_VIDEO1394_DEV MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_VIDEO1394 * 16)
211#define IEEE1394_DV1394_DEV MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_DV1394 * 16) 209#define IEEE1394_DV1394_DEV MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_DV1394 * 16)
212#define IEEE1394_AMDTP_DEV MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_AMDTP * 16)
213#define IEEE1394_EXPERIMENTAL_DEV MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_EXPERIMENTAL * 16) 210#define IEEE1394_EXPERIMENTAL_DEV MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_EXPERIMENTAL * 16)
214 211
215/* return the index (within a minor number block) of a file */ 212/* return the index (within a minor number block) of a file */
diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c
index 314f35540034..19222878aae9 100644
--- a/drivers/ieee1394/ohci1394.c
+++ b/drivers/ieee1394/ohci1394.c
@@ -544,12 +544,19 @@ static void ohci_initialize(struct ti_ohci *ohci)
544 /* Initialize IR Legacy DMA channel mask */ 544 /* Initialize IR Legacy DMA channel mask */
545 ohci->ir_legacy_channels = 0; 545 ohci->ir_legacy_channels = 0;
546 546
547 /* 547 /* Accept AR requests from all nodes */
548 * Accept AT requests from all nodes. This probably 548 reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
549 * will have to be controlled from the subsystem 549
550 * on a per node basis. 550 /* Set the address range of the physical response unit.
551 */ 551 * Most controllers do not implement it as a writable register though.
552 reg_write(ohci,OHCI1394_AsReqFilterHiSet, 0x80000000); 552 * They will keep a hardwired offset of 0x00010000 and show 0x0 as
553 * register content.
554 * To actually enable physical responses is the job of our interrupt
555 * handler which programs the physical request filter. */
556 reg_write(ohci, OHCI1394_PhyUpperBound, 0xffff0000);
557
558 DBGMSG("physUpperBoundOffset=%08x",
559 reg_read(ohci, OHCI1394_PhyUpperBound));
553 560
554 /* Specify AT retries */ 561 /* Specify AT retries */
555 reg_write(ohci, OHCI1394_ATRetries, 562 reg_write(ohci, OHCI1394_ATRetries,
@@ -572,6 +579,7 @@ static void ohci_initialize(struct ti_ohci *ohci)
572 OHCI1394_reqTxComplete | 579 OHCI1394_reqTxComplete |
573 OHCI1394_isochRx | 580 OHCI1394_isochRx |
574 OHCI1394_isochTx | 581 OHCI1394_isochTx |
582 OHCI1394_postedWriteErr |
575 OHCI1394_cycleInconsistent); 583 OHCI1394_cycleInconsistent);
576 584
577 /* Enable link */ 585 /* Enable link */
@@ -2374,7 +2382,10 @@ static irqreturn_t ohci_irq_handler(int irq, void *dev_id,
2374 2382
2375 event &= ~OHCI1394_unrecoverableError; 2383 event &= ~OHCI1394_unrecoverableError;
2376 } 2384 }
2377 2385 if (event & OHCI1394_postedWriteErr) {
2386 PRINT(KERN_ERR, "physical posted write error");
2387 /* no recovery strategy yet, had to involve protocol drivers */
2388 }
2378 if (event & OHCI1394_cycleInconsistent) { 2389 if (event & OHCI1394_cycleInconsistent) {
2379 /* We subscribe to the cycleInconsistent event only to 2390 /* We subscribe to the cycleInconsistent event only to
2380 * clear the corresponding event bit... otherwise, 2391 * clear the corresponding event bit... otherwise,
@@ -2382,7 +2393,6 @@ static irqreturn_t ohci_irq_handler(int irq, void *dev_id,
2382 DBGMSG("OHCI1394_cycleInconsistent"); 2393 DBGMSG("OHCI1394_cycleInconsistent");
2383 event &= ~OHCI1394_cycleInconsistent; 2394 event &= ~OHCI1394_cycleInconsistent;
2384 } 2395 }
2385
2386 if (event & OHCI1394_busReset) { 2396 if (event & OHCI1394_busReset) {
2387 /* The busReset event bit can't be cleared during the 2397 /* The busReset event bit can't be cleared during the
2388 * selfID phase, so we disable busReset interrupts, to 2398 * selfID phase, so we disable busReset interrupts, to
@@ -2426,7 +2436,6 @@ static irqreturn_t ohci_irq_handler(int irq, void *dev_id,
2426 } 2436 }
2427 event &= ~OHCI1394_busReset; 2437 event &= ~OHCI1394_busReset;
2428 } 2438 }
2429
2430 if (event & OHCI1394_reqTxComplete) { 2439 if (event & OHCI1394_reqTxComplete) {
2431 struct dma_trm_ctx *d = &ohci->at_req_context; 2440 struct dma_trm_ctx *d = &ohci->at_req_context;
2432 DBGMSG("Got reqTxComplete interrupt " 2441 DBGMSG("Got reqTxComplete interrupt "
@@ -2514,26 +2523,20 @@ static irqreturn_t ohci_irq_handler(int irq, void *dev_id,
2514 reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset); 2523 reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
2515 spin_unlock_irqrestore(&ohci->event_lock, flags); 2524 spin_unlock_irqrestore(&ohci->event_lock, flags);
2516 2525
2517 /* Accept Physical requests from all nodes. */
2518 reg_write(ohci,OHCI1394_AsReqFilterHiSet, 0xffffffff);
2519 reg_write(ohci,OHCI1394_AsReqFilterLoSet, 0xffffffff);
2520
2521 /* Turn on phys dma reception. 2526 /* Turn on phys dma reception.
2522 * 2527 *
2523 * TODO: Enable some sort of filtering management. 2528 * TODO: Enable some sort of filtering management.
2524 */ 2529 */
2525 if (phys_dma) { 2530 if (phys_dma) {
2526 reg_write(ohci,OHCI1394_PhyReqFilterHiSet, 0xffffffff); 2531 reg_write(ohci, OHCI1394_PhyReqFilterHiSet,
2527 reg_write(ohci,OHCI1394_PhyReqFilterLoSet, 0xffffffff); 2532 0xffffffff);
2528 reg_write(ohci,OHCI1394_PhyUpperBound, 0xffff0000); 2533 reg_write(ohci, OHCI1394_PhyReqFilterLoSet,
2529 } else { 2534 0xffffffff);
2530 reg_write(ohci,OHCI1394_PhyReqFilterHiSet, 0x00000000);
2531 reg_write(ohci,OHCI1394_PhyReqFilterLoSet, 0x00000000);
2532 } 2535 }
2533 2536
2534 DBGMSG("PhyReqFilter=%08x%08x", 2537 DBGMSG("PhyReqFilter=%08x%08x",
2535 reg_read(ohci,OHCI1394_PhyReqFilterHiSet), 2538 reg_read(ohci, OHCI1394_PhyReqFilterHiSet),
2536 reg_read(ohci,OHCI1394_PhyReqFilterLoSet)); 2539 reg_read(ohci, OHCI1394_PhyReqFilterLoSet));
2537 2540
2538 hpsb_selfid_complete(host, phyid, isroot); 2541 hpsb_selfid_complete(host, phyid, isroot);
2539 } else 2542 } else
@@ -3259,8 +3262,8 @@ static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
3259 * fail to report the right length. Anyway, the ohci spec 3262 * fail to report the right length. Anyway, the ohci spec
3260 * clearly says it's 2kb, so this shouldn't be a problem. */ 3263 * clearly says it's 2kb, so this shouldn't be a problem. */
3261 ohci_base = pci_resource_start(dev, 0); 3264 ohci_base = pci_resource_start(dev, 0);
3262 if (pci_resource_len(dev, 0) != OHCI1394_REGISTER_SIZE) 3265 if (pci_resource_len(dev, 0) < OHCI1394_REGISTER_SIZE)
3263 PRINT(KERN_WARNING, "Unexpected PCI resource length of %lx!", 3266 PRINT(KERN_WARNING, "PCI resource length of %lx too small!",
3264 pci_resource_len(dev, 0)); 3267 pci_resource_len(dev, 0));
3265 3268
3266 /* Seems PCMCIA handles this internally. Not sure why. Seems 3269 /* Seems PCMCIA handles this internally. Not sure why. Seems
diff --git a/drivers/ieee1394/raw1394.c b/drivers/ieee1394/raw1394.c
index 19f26c5c9479..f7de546f2ed6 100644
--- a/drivers/ieee1394/raw1394.c
+++ b/drivers/ieee1394/raw1394.c
@@ -41,7 +41,6 @@
41#include <linux/cdev.h> 41#include <linux/cdev.h>
42#include <asm/uaccess.h> 42#include <asm/uaccess.h>
43#include <asm/atomic.h> 43#include <asm/atomic.h>
44#include <linux/devfs_fs_kernel.h>
45#include <linux/compat.h> 44#include <linux/compat.h>
46 45
47#include "csr1212.h" 46#include "csr1212.h"
@@ -2999,9 +2998,6 @@ static int __init init_raw1394(void)
2999 goto out_unreg; 2998 goto out_unreg;
3000 } 2999 }
3001 3000
3002 devfs_mk_cdev(MKDEV(IEEE1394_MAJOR, IEEE1394_MINOR_BLOCK_RAW1394 * 16),
3003 S_IFCHR | S_IRUSR | S_IWUSR, RAW1394_DEVICE_NAME);
3004
3005 cdev_init(&raw1394_cdev, &raw1394_fops); 3001 cdev_init(&raw1394_cdev, &raw1394_fops);
3006 raw1394_cdev.owner = THIS_MODULE; 3002 raw1394_cdev.owner = THIS_MODULE;
3007 kobject_set_name(&raw1394_cdev.kobj, RAW1394_DEVICE_NAME); 3003 kobject_set_name(&raw1394_cdev.kobj, RAW1394_DEVICE_NAME);
@@ -3023,7 +3019,6 @@ static int __init init_raw1394(void)
3023 goto out; 3019 goto out;
3024 3020
3025 out_dev: 3021 out_dev:
3026 devfs_remove(RAW1394_DEVICE_NAME);
3027 class_device_destroy(hpsb_protocol_class, 3022 class_device_destroy(hpsb_protocol_class,
3028 MKDEV(IEEE1394_MAJOR, 3023 MKDEV(IEEE1394_MAJOR,
3029 IEEE1394_MINOR_BLOCK_RAW1394 * 16)); 3024 IEEE1394_MINOR_BLOCK_RAW1394 * 16));
@@ -3039,7 +3034,6 @@ static void __exit cleanup_raw1394(void)
3039 MKDEV(IEEE1394_MAJOR, 3034 MKDEV(IEEE1394_MAJOR,
3040 IEEE1394_MINOR_BLOCK_RAW1394 * 16)); 3035 IEEE1394_MINOR_BLOCK_RAW1394 * 16));
3041 cdev_del(&raw1394_cdev); 3036 cdev_del(&raw1394_cdev);
3042 devfs_remove(RAW1394_DEVICE_NAME);
3043 hpsb_unregister_highlevel(&raw1394_highlevel); 3037 hpsb_unregister_highlevel(&raw1394_highlevel);
3044 hpsb_unregister_protocol(&raw1394_driver); 3038 hpsb_unregister_protocol(&raw1394_driver);
3045} 3039}
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index eca92eb475a1..2c765ca5aa50 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -214,6 +214,7 @@ static u32 global_outstanding_dmas = 0;
214#endif 214#endif
215 215
216#define SBP2_ERR(fmt, args...) HPSB_ERR("sbp2: "fmt, ## args) 216#define SBP2_ERR(fmt, args...) HPSB_ERR("sbp2: "fmt, ## args)
217#define SBP2_DEBUG_ENTER() SBP2_DEBUG("%s", __FUNCTION__)
217 218
218/* 219/*
219 * Globals 220 * Globals
@@ -535,7 +536,7 @@ static struct sbp2_command_info *sbp2util_allocate_command_orb(
535 command->Current_SCpnt = Current_SCpnt; 536 command->Current_SCpnt = Current_SCpnt;
536 list_add_tail(&command->list, &scsi_id->sbp2_command_orb_inuse); 537 list_add_tail(&command->list, &scsi_id->sbp2_command_orb_inuse);
537 } else { 538 } else {
538 SBP2_ERR("sbp2util_allocate_command_orb - No orbs available!"); 539 SBP2_ERR("%s: no orbs available", __FUNCTION__);
539 } 540 }
540 spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags); 541 spin_unlock_irqrestore(&scsi_id->sbp2_command_orb_lock, flags);
541 return command; 542 return command;
@@ -549,7 +550,7 @@ static void sbp2util_free_command_dma(struct sbp2_command_info *command)
549 struct hpsb_host *host; 550 struct hpsb_host *host;
550 551
551 if (!scsi_id) { 552 if (!scsi_id) {
552 printk(KERN_ERR "%s: scsi_id == NULL\n", __FUNCTION__); 553 SBP2_ERR("%s: scsi_id == NULL", __FUNCTION__);
553 return; 554 return;
554 } 555 }
555 556
@@ -610,7 +611,7 @@ static int sbp2_probe(struct device *dev)
610 struct unit_directory *ud; 611 struct unit_directory *ud;
611 struct scsi_id_instance_data *scsi_id; 612 struct scsi_id_instance_data *scsi_id;
612 613
613 SBP2_DEBUG("sbp2_probe"); 614 SBP2_DEBUG_ENTER();
614 615
615 ud = container_of(dev, struct unit_directory, device); 616 ud = container_of(dev, struct unit_directory, device);
616 617
@@ -635,7 +636,7 @@ static int sbp2_remove(struct device *dev)
635 struct scsi_id_instance_data *scsi_id; 636 struct scsi_id_instance_data *scsi_id;
636 struct scsi_device *sdev; 637 struct scsi_device *sdev;
637 638
638 SBP2_DEBUG("sbp2_remove"); 639 SBP2_DEBUG_ENTER();
639 640
640 ud = container_of(dev, struct unit_directory, device); 641 ud = container_of(dev, struct unit_directory, device);
641 scsi_id = ud->device.driver_data; 642 scsi_id = ud->device.driver_data;
@@ -667,7 +668,7 @@ static int sbp2_update(struct unit_directory *ud)
667{ 668{
668 struct scsi_id_instance_data *scsi_id = ud->device.driver_data; 669 struct scsi_id_instance_data *scsi_id = ud->device.driver_data;
669 670
670 SBP2_DEBUG("sbp2_update"); 671 SBP2_DEBUG_ENTER();
671 672
672 if (sbp2_reconnect_device(scsi_id)) { 673 if (sbp2_reconnect_device(scsi_id)) {
673 674
@@ -715,7 +716,7 @@ static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud
715 struct Scsi_Host *scsi_host = NULL; 716 struct Scsi_Host *scsi_host = NULL;
716 struct scsi_id_instance_data *scsi_id = NULL; 717 struct scsi_id_instance_data *scsi_id = NULL;
717 718
718 SBP2_DEBUG("sbp2_alloc_device"); 719 SBP2_DEBUG_ENTER();
719 720
720 scsi_id = kzalloc(sizeof(*scsi_id), GFP_KERNEL); 721 scsi_id = kzalloc(sizeof(*scsi_id), GFP_KERNEL);
721 if (!scsi_id) { 722 if (!scsi_id) {
@@ -749,12 +750,22 @@ static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud
749 750
750#ifdef CONFIG_IEEE1394_SBP2_PHYS_DMA 751#ifdef CONFIG_IEEE1394_SBP2_PHYS_DMA
751 /* Handle data movement if physical dma is not 752 /* Handle data movement if physical dma is not
752 * enabled/supportedon host controller */ 753 * enabled or not supported on host controller */
753 hpsb_register_addrspace(&sbp2_highlevel, ud->ne->host, &sbp2_physdma_ops, 754 if (!hpsb_register_addrspace(&sbp2_highlevel, ud->ne->host,
754 0x0ULL, 0xfffffffcULL); 755 &sbp2_physdma_ops,
756 0x0ULL, 0xfffffffcULL)) {
757 SBP2_ERR("failed to register lower 4GB address range");
758 goto failed_alloc;
759 }
755#endif 760#endif
756 } 761 }
757 762
763 /* Prevent unloading of the 1394 host */
764 if (!try_module_get(hi->host->driver->owner)) {
765 SBP2_ERR("failed to get a reference on 1394 host driver");
766 goto failed_alloc;
767 }
768
758 scsi_id->hi = hi; 769 scsi_id->hi = hi;
759 770
760 list_add_tail(&scsi_id->scsi_list, &hi->scsi_ids); 771 list_add_tail(&scsi_id->scsi_list, &hi->scsi_ids);
@@ -816,7 +827,7 @@ static int sbp2_start_device(struct scsi_id_instance_data *scsi_id)
816 struct sbp2scsi_host_info *hi = scsi_id->hi; 827 struct sbp2scsi_host_info *hi = scsi_id->hi;
817 int error; 828 int error;
818 829
819 SBP2_DEBUG("sbp2_start_device"); 830 SBP2_DEBUG_ENTER();
820 831
821 /* Login FIFO DMA */ 832 /* Login FIFO DMA */
822 scsi_id->login_response = 833 scsi_id->login_response =
@@ -891,7 +902,6 @@ static int sbp2_start_device(struct scsi_id_instance_data *scsi_id)
891 * allows someone else to login instead. One second makes sense. */ 902 * allows someone else to login instead. One second makes sense. */
892 msleep_interruptible(1000); 903 msleep_interruptible(1000);
893 if (signal_pending(current)) { 904 if (signal_pending(current)) {
894 SBP2_WARN("aborting sbp2_start_device due to event");
895 sbp2_remove_device(scsi_id); 905 sbp2_remove_device(scsi_id);
896 return -EINTR; 906 return -EINTR;
897 } 907 }
@@ -944,7 +954,7 @@ static void sbp2_remove_device(struct scsi_id_instance_data *scsi_id)
944{ 954{
945 struct sbp2scsi_host_info *hi; 955 struct sbp2scsi_host_info *hi;
946 956
947 SBP2_DEBUG("sbp2_remove_device"); 957 SBP2_DEBUG_ENTER();
948 958
949 if (!scsi_id) 959 if (!scsi_id)
950 return; 960 return;
@@ -1015,6 +1025,9 @@ static void sbp2_remove_device(struct scsi_id_instance_data *scsi_id)
1015 1025
1016 scsi_id->ud->device.driver_data = NULL; 1026 scsi_id->ud->device.driver_data = NULL;
1017 1027
1028 if (hi)
1029 module_put(hi->host->driver->owner);
1030
1018 SBP2_DEBUG("SBP-2 device removed, SCSI ID = %d", scsi_id->ud->id); 1031 SBP2_DEBUG("SBP-2 device removed, SCSI ID = %d", scsi_id->ud->id);
1019 1032
1020 kfree(scsi_id); 1033 kfree(scsi_id);
@@ -1073,23 +1086,20 @@ static int sbp2_query_logins(struct scsi_id_instance_data *scsi_id)
1073 int max_logins; 1086 int max_logins;
1074 int active_logins; 1087 int active_logins;
1075 1088
1076 SBP2_DEBUG("sbp2_query_logins"); 1089 SBP2_DEBUG_ENTER();
1077 1090
1078 scsi_id->query_logins_orb->reserved1 = 0x0; 1091 scsi_id->query_logins_orb->reserved1 = 0x0;
1079 scsi_id->query_logins_orb->reserved2 = 0x0; 1092 scsi_id->query_logins_orb->reserved2 = 0x0;
1080 1093
1081 scsi_id->query_logins_orb->query_response_lo = scsi_id->query_logins_response_dma; 1094 scsi_id->query_logins_orb->query_response_lo = scsi_id->query_logins_response_dma;
1082 scsi_id->query_logins_orb->query_response_hi = ORB_SET_NODE_ID(hi->host->node_id); 1095 scsi_id->query_logins_orb->query_response_hi = ORB_SET_NODE_ID(hi->host->node_id);
1083 SBP2_DEBUG("sbp2_query_logins: query_response_hi/lo initialized");
1084 1096
1085 scsi_id->query_logins_orb->lun_misc = ORB_SET_FUNCTION(SBP2_QUERY_LOGINS_REQUEST); 1097 scsi_id->query_logins_orb->lun_misc = ORB_SET_FUNCTION(SBP2_QUERY_LOGINS_REQUEST);
1086 scsi_id->query_logins_orb->lun_misc |= ORB_SET_NOTIFY(1); 1098 scsi_id->query_logins_orb->lun_misc |= ORB_SET_NOTIFY(1);
1087 scsi_id->query_logins_orb->lun_misc |= ORB_SET_LUN(scsi_id->sbp2_lun); 1099 scsi_id->query_logins_orb->lun_misc |= ORB_SET_LUN(scsi_id->sbp2_lun);
1088 SBP2_DEBUG("sbp2_query_logins: lun_misc initialized");
1089 1100
1090 scsi_id->query_logins_orb->reserved_resp_length = 1101 scsi_id->query_logins_orb->reserved_resp_length =
1091 ORB_SET_QUERY_LOGINS_RESP_LENGTH(sizeof(struct sbp2_query_logins_response)); 1102 ORB_SET_QUERY_LOGINS_RESP_LENGTH(sizeof(struct sbp2_query_logins_response));
1092 SBP2_DEBUG("sbp2_query_logins: reserved_resp_length initialized");
1093 1103
1094 scsi_id->query_logins_orb->status_fifo_hi = 1104 scsi_id->query_logins_orb->status_fifo_hi =
1095 ORB_SET_STATUS_FIFO_HI(scsi_id->status_fifo_addr, hi->host->node_id); 1105 ORB_SET_STATUS_FIFO_HI(scsi_id->status_fifo_addr, hi->host->node_id);
@@ -1098,25 +1108,19 @@ static int sbp2_query_logins(struct scsi_id_instance_data *scsi_id)
1098 1108
1099 sbp2util_cpu_to_be32_buffer(scsi_id->query_logins_orb, sizeof(struct sbp2_query_logins_orb)); 1109 sbp2util_cpu_to_be32_buffer(scsi_id->query_logins_orb, sizeof(struct sbp2_query_logins_orb));
1100 1110
1101 SBP2_DEBUG("sbp2_query_logins: orb byte-swapped");
1102
1103 sbp2util_packet_dump(scsi_id->query_logins_orb, sizeof(struct sbp2_query_logins_orb), 1111 sbp2util_packet_dump(scsi_id->query_logins_orb, sizeof(struct sbp2_query_logins_orb),
1104 "sbp2 query logins orb", scsi_id->query_logins_orb_dma); 1112 "sbp2 query logins orb", scsi_id->query_logins_orb_dma);
1105 1113
1106 memset(scsi_id->query_logins_response, 0, sizeof(struct sbp2_query_logins_response)); 1114 memset(scsi_id->query_logins_response, 0, sizeof(struct sbp2_query_logins_response));
1107 memset(&scsi_id->status_block, 0, sizeof(struct sbp2_status_block)); 1115 memset(&scsi_id->status_block, 0, sizeof(struct sbp2_status_block));
1108 1116
1109 SBP2_DEBUG("sbp2_query_logins: query_logins_response/status FIFO memset");
1110
1111 data[0] = ORB_SET_NODE_ID(hi->host->node_id); 1117 data[0] = ORB_SET_NODE_ID(hi->host->node_id);
1112 data[1] = scsi_id->query_logins_orb_dma; 1118 data[1] = scsi_id->query_logins_orb_dma;
1113 sbp2util_cpu_to_be32_buffer(data, 8); 1119 sbp2util_cpu_to_be32_buffer(data, 8);
1114 1120
1115 atomic_set(&scsi_id->sbp2_login_complete, 0); 1121 atomic_set(&scsi_id->sbp2_login_complete, 0);
1116 1122
1117 SBP2_DEBUG("sbp2_query_logins: prepared to write");
1118 hpsb_node_write(scsi_id->ne, scsi_id->sbp2_management_agent_addr, data, 8); 1123 hpsb_node_write(scsi_id->ne, scsi_id->sbp2_management_agent_addr, data, 8);
1119 SBP2_DEBUG("sbp2_query_logins: written");
1120 1124
1121 if (sbp2util_down_timeout(&scsi_id->sbp2_login_complete, 2*HZ)) { 1125 if (sbp2util_down_timeout(&scsi_id->sbp2_login_complete, 2*HZ)) {
1122 SBP2_INFO("Error querying logins to SBP-2 device - timed out"); 1126 SBP2_INFO("Error querying logins to SBP-2 device - timed out");
@@ -1165,10 +1169,10 @@ static int sbp2_login_device(struct scsi_id_instance_data *scsi_id)
1165 struct sbp2scsi_host_info *hi = scsi_id->hi; 1169 struct sbp2scsi_host_info *hi = scsi_id->hi;
1166 quadlet_t data[2]; 1170 quadlet_t data[2];
1167 1171
1168 SBP2_DEBUG("sbp2_login_device"); 1172 SBP2_DEBUG_ENTER();
1169 1173
1170 if (!scsi_id->login_orb) { 1174 if (!scsi_id->login_orb) {
1171 SBP2_DEBUG("sbp2_login_device: login_orb not alloc'd!"); 1175 SBP2_DEBUG("%s: login_orb not alloc'd!", __FUNCTION__);
1172 return -EIO; 1176 return -EIO;
1173 } 1177 }
1174 1178
@@ -1182,59 +1186,39 @@ static int sbp2_login_device(struct scsi_id_instance_data *scsi_id)
1182 /* Set-up login ORB, assume no password */ 1186 /* Set-up login ORB, assume no password */
1183 scsi_id->login_orb->password_hi = 0; 1187 scsi_id->login_orb->password_hi = 0;
1184 scsi_id->login_orb->password_lo = 0; 1188 scsi_id->login_orb->password_lo = 0;
1185 SBP2_DEBUG("sbp2_login_device: password_hi/lo initialized");
1186 1189
1187 scsi_id->login_orb->login_response_lo = scsi_id->login_response_dma; 1190 scsi_id->login_orb->login_response_lo = scsi_id->login_response_dma;
1188 scsi_id->login_orb->login_response_hi = ORB_SET_NODE_ID(hi->host->node_id); 1191 scsi_id->login_orb->login_response_hi = ORB_SET_NODE_ID(hi->host->node_id);
1189 SBP2_DEBUG("sbp2_login_device: login_response_hi/lo initialized");
1190 1192
1191 scsi_id->login_orb->lun_misc = ORB_SET_FUNCTION(SBP2_LOGIN_REQUEST); 1193 scsi_id->login_orb->lun_misc = ORB_SET_FUNCTION(SBP2_LOGIN_REQUEST);
1192 scsi_id->login_orb->lun_misc |= ORB_SET_RECONNECT(0); /* One second reconnect time */ 1194 scsi_id->login_orb->lun_misc |= ORB_SET_RECONNECT(0); /* One second reconnect time */
1193 scsi_id->login_orb->lun_misc |= ORB_SET_EXCLUSIVE(exclusive_login); /* Exclusive access to device */ 1195 scsi_id->login_orb->lun_misc |= ORB_SET_EXCLUSIVE(exclusive_login); /* Exclusive access to device */
1194 scsi_id->login_orb->lun_misc |= ORB_SET_NOTIFY(1); /* Notify us of login complete */ 1196 scsi_id->login_orb->lun_misc |= ORB_SET_NOTIFY(1); /* Notify us of login complete */
1195 scsi_id->login_orb->lun_misc |= ORB_SET_LUN(scsi_id->sbp2_lun); 1197 scsi_id->login_orb->lun_misc |= ORB_SET_LUN(scsi_id->sbp2_lun);
1196 SBP2_DEBUG("sbp2_login_device: lun_misc initialized");
1197 1198
1198 scsi_id->login_orb->passwd_resp_lengths = 1199 scsi_id->login_orb->passwd_resp_lengths =
1199 ORB_SET_LOGIN_RESP_LENGTH(sizeof(struct sbp2_login_response)); 1200 ORB_SET_LOGIN_RESP_LENGTH(sizeof(struct sbp2_login_response));
1200 SBP2_DEBUG("sbp2_login_device: passwd_resp_lengths initialized");
1201 1201
1202 scsi_id->login_orb->status_fifo_hi = 1202 scsi_id->login_orb->status_fifo_hi =
1203 ORB_SET_STATUS_FIFO_HI(scsi_id->status_fifo_addr, hi->host->node_id); 1203 ORB_SET_STATUS_FIFO_HI(scsi_id->status_fifo_addr, hi->host->node_id);
1204 scsi_id->login_orb->status_fifo_lo = 1204 scsi_id->login_orb->status_fifo_lo =
1205 ORB_SET_STATUS_FIFO_LO(scsi_id->status_fifo_addr); 1205 ORB_SET_STATUS_FIFO_LO(scsi_id->status_fifo_addr);
1206 1206
1207 /*
1208 * Byte swap ORB if necessary
1209 */
1210 sbp2util_cpu_to_be32_buffer(scsi_id->login_orb, sizeof(struct sbp2_login_orb)); 1207 sbp2util_cpu_to_be32_buffer(scsi_id->login_orb, sizeof(struct sbp2_login_orb));
1211 1208
1212 SBP2_DEBUG("sbp2_login_device: orb byte-swapped");
1213
1214 sbp2util_packet_dump(scsi_id->login_orb, sizeof(struct sbp2_login_orb), 1209 sbp2util_packet_dump(scsi_id->login_orb, sizeof(struct sbp2_login_orb),
1215 "sbp2 login orb", scsi_id->login_orb_dma); 1210 "sbp2 login orb", scsi_id->login_orb_dma);
1216 1211
1217 /*
1218 * Initialize login response and status fifo
1219 */
1220 memset(scsi_id->login_response, 0, sizeof(struct sbp2_login_response)); 1212 memset(scsi_id->login_response, 0, sizeof(struct sbp2_login_response));
1221 memset(&scsi_id->status_block, 0, sizeof(struct sbp2_status_block)); 1213 memset(&scsi_id->status_block, 0, sizeof(struct sbp2_status_block));
1222 1214
1223 SBP2_DEBUG("sbp2_login_device: login_response/status FIFO memset");
1224
1225 /*
1226 * Ok, let's write to the target's management agent register
1227 */
1228 data[0] = ORB_SET_NODE_ID(hi->host->node_id); 1215 data[0] = ORB_SET_NODE_ID(hi->host->node_id);
1229 data[1] = scsi_id->login_orb_dma; 1216 data[1] = scsi_id->login_orb_dma;
1230 sbp2util_cpu_to_be32_buffer(data, 8); 1217 sbp2util_cpu_to_be32_buffer(data, 8);
1231 1218
1232 atomic_set(&scsi_id->sbp2_login_complete, 0); 1219 atomic_set(&scsi_id->sbp2_login_complete, 0);
1233 1220
1234 SBP2_DEBUG("sbp2_login_device: prepared to write to %08x",
1235 (unsigned int)scsi_id->sbp2_management_agent_addr);
1236 hpsb_node_write(scsi_id->ne, scsi_id->sbp2_management_agent_addr, data, 8); 1221 hpsb_node_write(scsi_id->ne, scsi_id->sbp2_management_agent_addr, data, 8);
1237 SBP2_DEBUG("sbp2_login_device: written");
1238 1222
1239 /* 1223 /*
1240 * Wait for login status (up to 20 seconds)... 1224 * Wait for login status (up to 20 seconds)...
@@ -1298,7 +1282,7 @@ static int sbp2_logout_device(struct scsi_id_instance_data *scsi_id)
1298 quadlet_t data[2]; 1282 quadlet_t data[2];
1299 int error; 1283 int error;
1300 1284
1301 SBP2_DEBUG("sbp2_logout_device"); 1285 SBP2_DEBUG_ENTER();
1302 1286
1303 /* 1287 /*
1304 * Set-up logout ORB 1288 * Set-up logout ORB
@@ -1362,7 +1346,7 @@ static int sbp2_reconnect_device(struct scsi_id_instance_data *scsi_id)
1362 quadlet_t data[2]; 1346 quadlet_t data[2];
1363 int error; 1347 int error;
1364 1348
1365 SBP2_DEBUG("sbp2_reconnect_device"); 1349 SBP2_DEBUG_ENTER();
1366 1350
1367 /* 1351 /*
1368 * Set-up reconnect ORB 1352 * Set-up reconnect ORB
@@ -1453,17 +1437,11 @@ static int sbp2_set_busy_timeout(struct scsi_id_instance_data *scsi_id)
1453{ 1437{
1454 quadlet_t data; 1438 quadlet_t data;
1455 1439
1456 SBP2_DEBUG("sbp2_set_busy_timeout"); 1440 SBP2_DEBUG_ENTER();
1457 1441
1458 /*
1459 * Ok, let's write to the target's busy timeout register
1460 */
1461 data = cpu_to_be32(SBP2_BUSY_TIMEOUT_VALUE); 1442 data = cpu_to_be32(SBP2_BUSY_TIMEOUT_VALUE);
1462 1443 if (hpsb_node_write(scsi_id->ne, SBP2_BUSY_TIMEOUT_ADDRESS, &data, 4))
1463 if (hpsb_node_write(scsi_id->ne, SBP2_BUSY_TIMEOUT_ADDRESS, &data, 4)) { 1444 SBP2_ERR("%s error", __FUNCTION__);
1464 SBP2_ERR("sbp2_set_busy_timeout error");
1465 }
1466
1467 return 0; 1445 return 0;
1468} 1446}
1469 1447
@@ -1482,7 +1460,7 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id,
1482 firmware_revision, workarounds; 1460 firmware_revision, workarounds;
1483 int i; 1461 int i;
1484 1462
1485 SBP2_DEBUG("sbp2_parse_unit_directory"); 1463 SBP2_DEBUG_ENTER();
1486 1464
1487 management_agent_addr = 0x0; 1465 management_agent_addr = 0x0;
1488 command_set_spec_id = 0x0; 1466 command_set_spec_id = 0x0;
@@ -1615,7 +1593,7 @@ static int sbp2_max_speed_and_size(struct scsi_id_instance_data *scsi_id)
1615{ 1593{
1616 struct sbp2scsi_host_info *hi = scsi_id->hi; 1594 struct sbp2scsi_host_info *hi = scsi_id->hi;
1617 1595
1618 SBP2_DEBUG("sbp2_max_speed_and_size"); 1596 SBP2_DEBUG_ENTER();
1619 1597
1620 /* Initial setting comes from the hosts speed map */ 1598 /* Initial setting comes from the hosts speed map */
1621 scsi_id->speed_code = 1599 scsi_id->speed_code =
@@ -1652,11 +1630,8 @@ static int sbp2_agent_reset(struct scsi_id_instance_data *scsi_id, int wait)
1652 u64 addr; 1630 u64 addr;
1653 int retval; 1631 int retval;
1654 1632
1655 SBP2_DEBUG("sbp2_agent_reset"); 1633 SBP2_DEBUG_ENTER();
1656 1634
1657 /*
1658 * Ok, let's write to the target's management agent register
1659 */
1660 data = ntohl(SBP2_AGENT_RESET_DATA); 1635 data = ntohl(SBP2_AGENT_RESET_DATA);
1661 addr = scsi_id->sbp2_command_block_agent_addr + SBP2_AGENT_RESET_OFFSET; 1636 addr = scsi_id->sbp2_command_block_agent_addr + SBP2_AGENT_RESET_OFFSET;
1662 1637
@@ -2004,11 +1979,7 @@ static int sbp2_send_command(struct scsi_id_instance_data *scsi_id,
2004 unsigned int request_bufflen = SCpnt->request_bufflen; 1979 unsigned int request_bufflen = SCpnt->request_bufflen;
2005 struct sbp2_command_info *command; 1980 struct sbp2_command_info *command;
2006 1981
2007 SBP2_DEBUG("sbp2_send_command"); 1982 SBP2_DEBUG_ENTER();
2008#if (CONFIG_IEEE1394_SBP2_DEBUG >= 2) || defined(CONFIG_IEEE1394_SBP2_PACKET_DUMP)
2009 printk("[scsi command]\n ");
2010 scsi_print_command(SCpnt);
2011#endif
2012 SBP2_DEBUG("SCSI transfer size = %x", request_bufflen); 1983 SBP2_DEBUG("SCSI transfer size = %x", request_bufflen);
2013 SBP2_DEBUG("SCSI s/g elements = %x", (unsigned int)SCpnt->use_sg); 1984 SBP2_DEBUG("SCSI s/g elements = %x", (unsigned int)SCpnt->use_sg);
2014 1985
@@ -2048,7 +2019,7 @@ static int sbp2_send_command(struct scsi_id_instance_data *scsi_id,
2048 */ 2019 */
2049static unsigned int sbp2_status_to_sense_data(unchar *sbp2_status, unchar *sense_data) 2020static unsigned int sbp2_status_to_sense_data(unchar *sbp2_status, unchar *sense_data)
2050{ 2021{
2051 SBP2_DEBUG("sbp2_status_to_sense_data"); 2022 SBP2_DEBUG_ENTER();
2052 2023
2053 /* 2024 /*
2054 * Ok, it's pretty ugly... ;-) 2025 * Ok, it's pretty ugly... ;-)
@@ -2082,7 +2053,7 @@ static void sbp2_check_sbp2_response(struct scsi_id_instance_data *scsi_id,
2082{ 2053{
2083 u8 *scsi_buf = SCpnt->request_buffer; 2054 u8 *scsi_buf = SCpnt->request_buffer;
2084 2055
2085 SBP2_DEBUG("sbp2_check_sbp2_response"); 2056 SBP2_DEBUG_ENTER();
2086 2057
2087 if (SCpnt->cmnd[0] == INQUIRY && (SCpnt->cmnd[1] & 3) == 0) { 2058 if (SCpnt->cmnd[0] == INQUIRY && (SCpnt->cmnd[1] & 3) == 0) {
2088 /* 2059 /*
@@ -2113,7 +2084,7 @@ static int sbp2_handle_status_write(struct hpsb_host *host, int nodeid, int dest
2113 struct sbp2_command_info *command; 2084 struct sbp2_command_info *command;
2114 unsigned long flags; 2085 unsigned long flags;
2115 2086
2116 SBP2_DEBUG("sbp2_handle_status_write"); 2087 SBP2_DEBUG_ENTER();
2117 2088
2118 sbp2util_packet_dump(data, length, "sbp2 status write by device", (u32)addr); 2089 sbp2util_packet_dump(data, length, "sbp2 status write by device", (u32)addr);
2119 2090
@@ -2260,7 +2231,10 @@ static int sbp2scsi_queuecommand(struct scsi_cmnd *SCpnt,
2260 struct sbp2scsi_host_info *hi; 2231 struct sbp2scsi_host_info *hi;
2261 int result = DID_NO_CONNECT << 16; 2232 int result = DID_NO_CONNECT << 16;
2262 2233
2263 SBP2_DEBUG("sbp2scsi_queuecommand"); 2234 SBP2_DEBUG_ENTER();
2235#if (CONFIG_IEEE1394_SBP2_DEBUG >= 2) || defined(CONFIG_IEEE1394_SBP2_PACKET_DUMP)
2236 scsi_print_command(SCpnt);
2237#endif
2264 2238
2265 if (!sbp2util_node_is_available(scsi_id)) 2239 if (!sbp2util_node_is_available(scsi_id))
2266 goto done; 2240 goto done;
@@ -2338,7 +2312,7 @@ static void sbp2scsi_complete_all_commands(struct scsi_id_instance_data *scsi_id
2338 struct sbp2_command_info *command; 2312 struct sbp2_command_info *command;
2339 unsigned long flags; 2313 unsigned long flags;
2340 2314
2341 SBP2_DEBUG("sbp2scsi_complete_all_commands"); 2315 SBP2_DEBUG_ENTER();
2342 2316
2343 spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags); 2317 spin_lock_irqsave(&scsi_id->sbp2_command_orb_lock, flags);
2344 while (!list_empty(&scsi_id->sbp2_command_orb_inuse)) { 2318 while (!list_empty(&scsi_id->sbp2_command_orb_inuse)) {
@@ -2371,7 +2345,7 @@ static void sbp2scsi_complete_command(struct scsi_id_instance_data *scsi_id,
2371 u32 scsi_status, struct scsi_cmnd *SCpnt, 2345 u32 scsi_status, struct scsi_cmnd *SCpnt,
2372 void (*done)(struct scsi_cmnd *)) 2346 void (*done)(struct scsi_cmnd *))
2373{ 2347{
2374 SBP2_DEBUG("sbp2scsi_complete_command"); 2348 SBP2_DEBUG_ENTER();
2375 2349
2376 /* 2350 /*
2377 * Sanity 2351 * Sanity
@@ -2397,7 +2371,7 @@ static void sbp2scsi_complete_command(struct scsi_id_instance_data *scsi_id,
2397 */ 2371 */
2398 switch (scsi_status) { 2372 switch (scsi_status) {
2399 case SBP2_SCSI_STATUS_GOOD: 2373 case SBP2_SCSI_STATUS_GOOD:
2400 SCpnt->result = DID_OK; 2374 SCpnt->result = DID_OK << 16;
2401 break; 2375 break;
2402 2376
2403 case SBP2_SCSI_STATUS_BUSY: 2377 case SBP2_SCSI_STATUS_BUSY:
@@ -2407,16 +2381,11 @@ static void sbp2scsi_complete_command(struct scsi_id_instance_data *scsi_id,
2407 2381
2408 case SBP2_SCSI_STATUS_CHECK_CONDITION: 2382 case SBP2_SCSI_STATUS_CHECK_CONDITION:
2409 SBP2_DEBUG("SBP2_SCSI_STATUS_CHECK_CONDITION"); 2383 SBP2_DEBUG("SBP2_SCSI_STATUS_CHECK_CONDITION");
2410 SCpnt->result = CHECK_CONDITION << 1; 2384 SCpnt->result = CHECK_CONDITION << 1 | DID_OK << 16;
2411
2412 /*
2413 * Debug stuff
2414 */
2415#if CONFIG_IEEE1394_SBP2_DEBUG >= 1 2385#if CONFIG_IEEE1394_SBP2_DEBUG >= 1
2416 scsi_print_command(SCpnt); 2386 scsi_print_command(SCpnt);
2417 scsi_print_sense("bh", SCpnt); 2387 scsi_print_sense(SBP2_DEVICE_NAME, SCpnt);
2418#endif 2388#endif
2419
2420 break; 2389 break;
2421 2390
2422 case SBP2_SCSI_STATUS_SELECTION_TIMEOUT: 2391 case SBP2_SCSI_STATUS_SELECTION_TIMEOUT:
@@ -2441,7 +2410,7 @@ static void sbp2scsi_complete_command(struct scsi_id_instance_data *scsi_id,
2441 /* 2410 /*
2442 * Take care of any sbp2 response data mucking here (RBC stuff, etc.) 2411 * Take care of any sbp2 response data mucking here (RBC stuff, etc.)
2443 */ 2412 */
2444 if (SCpnt->result == DID_OK) { 2413 if (SCpnt->result == DID_OK << 16) {
2445 sbp2_check_sbp2_response(scsi_id, SCpnt); 2414 sbp2_check_sbp2_response(scsi_id, SCpnt);
2446 } 2415 }
2447 2416
@@ -2459,6 +2428,8 @@ static void sbp2scsi_complete_command(struct scsi_id_instance_data *scsi_id,
2459 * If a unit attention occurs, return busy status so it gets 2428 * If a unit attention occurs, return busy status so it gets
2460 * retried... it could have happened because of a 1394 bus reset 2429 * retried... it could have happened because of a 1394 bus reset
2461 * or hot-plug... 2430 * or hot-plug...
2431 * XXX DID_BUS_BUSY is actually a bad idea because it will defy
2432 * the scsi layer's retry logic.
2462 */ 2433 */
2463#if 0 2434#if 0
2464 if ((scsi_status == SBP2_SCSI_STATUS_CHECK_CONDITION) && 2435 if ((scsi_status == SBP2_SCSI_STATUS_CHECK_CONDITION) &&
@@ -2624,7 +2595,7 @@ static int sbp2_module_init(void)
2624{ 2595{
2625 int ret; 2596 int ret;
2626 2597
2627 SBP2_DEBUG("sbp2_module_init"); 2598 SBP2_DEBUG_ENTER();
2628 2599
2629 /* Module load debug option to force one command at a time (serializing I/O) */ 2600 /* Module load debug option to force one command at a time (serializing I/O) */
2630 if (serialize_io) { 2601 if (serialize_io) {
@@ -2652,7 +2623,7 @@ static int sbp2_module_init(void)
2652 2623
2653static void __exit sbp2_module_exit(void) 2624static void __exit sbp2_module_exit(void)
2654{ 2625{
2655 SBP2_DEBUG("sbp2_module_exit"); 2626 SBP2_DEBUG_ENTER();
2656 2627
2657 hpsb_unregister_protocol(&sbp2_driver); 2628 hpsb_unregister_protocol(&sbp2_driver);
2658 2629
diff --git a/drivers/ieee1394/video1394.c b/drivers/ieee1394/video1394.c
index 216dbbf1dc8e..4e3bd62c458d 100644
--- a/drivers/ieee1394/video1394.c
+++ b/drivers/ieee1394/video1394.c
@@ -42,7 +42,6 @@
42#include <linux/poll.h> 42#include <linux/poll.h>
43#include <linux/smp_lock.h> 43#include <linux/smp_lock.h>
44#include <linux/delay.h> 44#include <linux/delay.h>
45#include <linux/devfs_fs_kernel.h>
46#include <linux/bitops.h> 45#include <linux/bitops.h>
47#include <linux/types.h> 46#include <linux/types.h>
48#include <linux/vmalloc.h> 47#include <linux/vmalloc.h>
@@ -1322,9 +1321,6 @@ static void video1394_add_host (struct hpsb_host *host)
1322 class_device_create(hpsb_protocol_class, NULL, MKDEV( 1321 class_device_create(hpsb_protocol_class, NULL, MKDEV(
1323 IEEE1394_MAJOR, minor), 1322 IEEE1394_MAJOR, minor),
1324 NULL, "%s-%d", VIDEO1394_DRIVER_NAME, ohci->host->id); 1323 NULL, "%s-%d", VIDEO1394_DRIVER_NAME, ohci->host->id);
1325 devfs_mk_cdev(MKDEV(IEEE1394_MAJOR, minor),
1326 S_IFCHR | S_IRUSR | S_IWUSR,
1327 "%s/%d", VIDEO1394_DRIVER_NAME, ohci->host->id);
1328} 1324}
1329 1325
1330 1326
@@ -1332,12 +1328,9 @@ static void video1394_remove_host (struct hpsb_host *host)
1332{ 1328{
1333 struct ti_ohci *ohci = hpsb_get_hostinfo(&video1394_highlevel, host); 1329 struct ti_ohci *ohci = hpsb_get_hostinfo(&video1394_highlevel, host);
1334 1330
1335 if (ohci) { 1331 if (ohci)
1336 class_device_destroy(hpsb_protocol_class, MKDEV(IEEE1394_MAJOR, 1332 class_device_destroy(hpsb_protocol_class, MKDEV(IEEE1394_MAJOR,
1337 IEEE1394_MINOR_BLOCK_VIDEO1394 * 16 + ohci->host->id)); 1333 IEEE1394_MINOR_BLOCK_VIDEO1394 * 16 + ohci->host->id));
1338 devfs_remove("%s/%d", VIDEO1394_DRIVER_NAME, ohci->host->id);
1339 }
1340
1341 return; 1334 return;
1342} 1335}
1343 1336
@@ -1478,12 +1471,8 @@ static long video1394_compat_ioctl(struct file *f, unsigned cmd, unsigned long a
1478static void __exit video1394_exit_module (void) 1471static void __exit video1394_exit_module (void)
1479{ 1472{
1480 hpsb_unregister_protocol(&video1394_driver); 1473 hpsb_unregister_protocol(&video1394_driver);
1481
1482 hpsb_unregister_highlevel(&video1394_highlevel); 1474 hpsb_unregister_highlevel(&video1394_highlevel);
1483
1484 devfs_remove(VIDEO1394_DRIVER_NAME);
1485 cdev_del(&video1394_cdev); 1475 cdev_del(&video1394_cdev);
1486
1487 PRINT_G(KERN_INFO, "Removed " VIDEO1394_DRIVER_NAME " module"); 1476 PRINT_G(KERN_INFO, "Removed " VIDEO1394_DRIVER_NAME " module");
1488} 1477}
1489 1478
@@ -1500,15 +1489,12 @@ static int __init video1394_init_module (void)
1500 return ret; 1489 return ret;
1501 } 1490 }
1502 1491
1503 devfs_mk_dir(VIDEO1394_DRIVER_NAME);
1504
1505 hpsb_register_highlevel(&video1394_highlevel); 1492 hpsb_register_highlevel(&video1394_highlevel);
1506 1493
1507 ret = hpsb_register_protocol(&video1394_driver); 1494 ret = hpsb_register_protocol(&video1394_driver);
1508 if (ret) { 1495 if (ret) {
1509 PRINT_G(KERN_ERR, "video1394: failed to register protocol"); 1496 PRINT_G(KERN_ERR, "video1394: failed to register protocol");
1510 hpsb_unregister_highlevel(&video1394_highlevel); 1497 hpsb_unregister_highlevel(&video1394_highlevel);
1511 devfs_remove(VIDEO1394_DRIVER_NAME);
1512 cdev_del(&video1394_cdev); 1498 cdev_del(&video1394_cdev);
1513 return ret; 1499 return ret;
1514 } 1500 }
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 9d1d2aa73e42..910a8ed74b5d 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -524,8 +524,6 @@ static int get_more_blocks(struct dio *dio)
524 */ 524 */
525 ret = dio->page_errors; 525 ret = dio->page_errors;
526 if (ret == 0) { 526 if (ret == 0) {
527 map_bh->b_state = 0;
528 map_bh->b_size = 0;
529 BUG_ON(dio->block_in_file >= dio->final_block_in_request); 527 BUG_ON(dio->block_in_file >= dio->final_block_in_request);
530 fs_startblk = dio->block_in_file >> dio->blkfactor; 528 fs_startblk = dio->block_in_file >> dio->blkfactor;
531 dio_count = dio->final_block_in_request - dio->block_in_file; 529 dio_count = dio->final_block_in_request - dio->block_in_file;
@@ -534,6 +532,9 @@ static int get_more_blocks(struct dio *dio)
534 if (dio_count & blkmask) 532 if (dio_count & blkmask)
535 fs_count++; 533 fs_count++;
536 534
535 map_bh->b_state = 0;
536 map_bh->b_size = fs_count << dio->inode->i_blkbits;
537
537 create = dio->rw == WRITE; 538 create = dio->rw == WRITE;
538 if (dio->lock_type == DIO_LOCKING) { 539 if (dio->lock_type == DIO_LOCKING) {
539 if (dio->block_in_file < (i_size_read(dio->inode) >> 540 if (dio->block_in_file < (i_size_read(dio->inode) >>
@@ -542,13 +543,13 @@ static int get_more_blocks(struct dio *dio)
542 } else if (dio->lock_type == DIO_NO_LOCKING) { 543 } else if (dio->lock_type == DIO_NO_LOCKING) {
543 create = 0; 544 create = 0;
544 } 545 }
546
545 /* 547 /*
546 * For writes inside i_size we forbid block creations: only 548 * For writes inside i_size we forbid block creations: only
547 * overwrites are permitted. We fall back to buffered writes 549 * overwrites are permitted. We fall back to buffered writes
548 * at a higher level for inside-i_size block-instantiating 550 * at a higher level for inside-i_size block-instantiating
549 * writes. 551 * writes.
550 */ 552 */
551 map_bh->b_size = fs_count << dio->blkbits;
552 ret = (*dio->get_block)(dio->inode, fs_startblk, 553 ret = (*dio->get_block)(dio->inode, fs_startblk,
553 map_bh, create); 554 map_bh, create);
554 } 555 }
diff --git a/fs/exec.c b/fs/exec.c
index c7397c46ad6d..950ebd43cdc3 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -616,6 +616,15 @@ static int de_thread(struct task_struct *tsk)
616 kmem_cache_free(sighand_cachep, newsighand); 616 kmem_cache_free(sighand_cachep, newsighand);
617 return -EAGAIN; 617 return -EAGAIN;
618 } 618 }
619
620 /*
621 * child_reaper ignores SIGKILL, change it now.
622 * Reparenting needs write_lock on tasklist_lock,
623 * so it is safe to do it under read_lock.
624 */
625 if (unlikely(current->group_leader == child_reaper))
626 child_reaper = current;
627
619 zap_other_threads(current); 628 zap_other_threads(current);
620 read_unlock(&tasklist_lock); 629 read_unlock(&tasklist_lock);
621 630
@@ -699,22 +708,30 @@ static int de_thread(struct task_struct *tsk)
699 remove_parent(current); 708 remove_parent(current);
700 remove_parent(leader); 709 remove_parent(leader);
701 710
702 switch_exec_pids(leader, current); 711
712 /* Become a process group leader with the old leader's pid.
713 * Note: The old leader also uses thispid until release_task
714 * is called. Odd but simple and correct.
715 */
716 detach_pid(current, PIDTYPE_PID);
717 current->pid = leader->pid;
718 attach_pid(current, PIDTYPE_PID, current->pid);
719 attach_pid(current, PIDTYPE_PGID, current->signal->pgrp);
720 attach_pid(current, PIDTYPE_SID, current->signal->session);
721 list_add_tail(&current->tasks, &init_task.tasks);
703 722
704 current->parent = current->real_parent = leader->real_parent; 723 current->parent = current->real_parent = leader->real_parent;
705 leader->parent = leader->real_parent = child_reaper; 724 leader->parent = leader->real_parent = child_reaper;
706 current->group_leader = current; 725 current->group_leader = current;
707 leader->group_leader = leader; 726 leader->group_leader = leader;
708 727
709 add_parent(current, current->parent); 728 add_parent(current);
710 add_parent(leader, leader->parent); 729 add_parent(leader);
711 if (ptrace) { 730 if (ptrace) {
712 current->ptrace = ptrace; 731 current->ptrace = ptrace;
713 __ptrace_link(current, parent); 732 __ptrace_link(current, parent);
714 } 733 }
715 734
716 list_del(&current->tasks);
717 list_add_tail(&current->tasks, &init_task.tasks);
718 current->exit_signal = SIGCHLD; 735 current->exit_signal = SIGCHLD;
719 736
720 BUG_ON(leader->exit_state != EXIT_ZOMBIE); 737 BUG_ON(leader->exit_state != EXIT_ZOMBIE);
@@ -751,7 +768,6 @@ no_thread_group:
751 /* 768 /*
752 * Move our state over to newsighand and switch it in. 769 * Move our state over to newsighand and switch it in.
753 */ 770 */
754 spin_lock_init(&newsighand->siglock);
755 atomic_set(&newsighand->count, 1); 771 atomic_set(&newsighand->count, 1);
756 memcpy(newsighand->action, oldsighand->action, 772 memcpy(newsighand->action, oldsighand->action,
757 sizeof(newsighand->action)); 773 sizeof(newsighand->action));
@@ -768,7 +784,7 @@ no_thread_group:
768 write_unlock_irq(&tasklist_lock); 784 write_unlock_irq(&tasklist_lock);
769 785
770 if (atomic_dec_and_test(&oldsighand->count)) 786 if (atomic_dec_and_test(&oldsighand->count))
771 sighand_free(oldsighand); 787 kmem_cache_free(sighand_cachep, oldsighand);
772 } 788 }
773 789
774 BUG_ON(!thread_group_leader(current)); 790 BUG_ON(!thread_group_leader(current));
diff --git a/fs/xfs/linux-2.6/mrlock.h b/fs/xfs/linux-2.6/mrlock.h
index 16b44c3c2362..1b262b790d9c 100644
--- a/fs/xfs/linux-2.6/mrlock.h
+++ b/fs/xfs/linux-2.6/mrlock.h
@@ -79,7 +79,7 @@ static inline void mrdemote(mrlock_t *mrp)
79 * Debug-only routine, without some platform-specific asm code, we can 79 * Debug-only routine, without some platform-specific asm code, we can
80 * now only answer requests regarding whether we hold the lock for write 80 * now only answer requests regarding whether we hold the lock for write
81 * (reader state is outside our visibility, we only track writer state). 81 * (reader state is outside our visibility, we only track writer state).
82 * Note: means !ismrlocked would give false positivies, so don't do that. 82 * Note: means !ismrlocked would give false positives, so don't do that.
83 */ 83 */
84static inline int ismrlocked(mrlock_t *mrp, int type) 84static inline int ismrlocked(mrlock_t *mrp, int type)
85{ 85{
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index c02f7c5b7462..6cbbd165c60d 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -372,7 +372,7 @@ static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
372 * assumes that all buffers on the page are started at the same time. 372 * assumes that all buffers on the page are started at the same time.
373 * 373 *
374 * The fix is two passes across the ioend list - one to start writeback on the 374 * The fix is two passes across the ioend list - one to start writeback on the
375 * bufferheads, and then the second one submit them for I/O. 375 * buffer_heads, and then submit them for I/O on the second pass.
376 */ 376 */
377STATIC void 377STATIC void
378xfs_submit_ioend( 378xfs_submit_ioend(
@@ -699,7 +699,7 @@ xfs_convert_page(
699 699
700 /* 700 /*
701 * page_dirty is initially a count of buffers on the page before 701 * page_dirty is initially a count of buffers on the page before
702 * EOF and is decrememted as we move each into a cleanable state. 702 * EOF and is decremented as we move each into a cleanable state.
703 * 703 *
704 * Derivation: 704 * Derivation:
705 * 705 *
@@ -842,7 +842,7 @@ xfs_cluster_write(
842 * page if possible. 842 * page if possible.
843 * The bh->b_state's cannot know if any of the blocks or which block for 843 * The bh->b_state's cannot know if any of the blocks or which block for
844 * that matter are dirty due to mmap writes, and therefore bh uptodate is 844 * that matter are dirty due to mmap writes, and therefore bh uptodate is
845 * only vaild if the page itself isn't completely uptodate. Some layers 845 * only valid if the page itself isn't completely uptodate. Some layers
846 * may clear the page dirty flag prior to calling write page, under the 846 * may clear the page dirty flag prior to calling write page, under the
847 * assumption the entire page will be written out; by not writing out the 847 * assumption the entire page will be written out; by not writing out the
848 * whole page the page can be reused before all valid dirty data is 848 * whole page the page can be reused before all valid dirty data is
@@ -892,7 +892,7 @@ xfs_page_state_convert(
892 892
893 /* 893 /*
894 * page_dirty is initially a count of buffers on the page before 894 * page_dirty is initially a count of buffers on the page before
895 * EOF and is decrememted as we move each into a cleanable state. 895 * EOF and is decremented as we move each into a cleanable state.
896 * 896 *
897 * Derivation: 897 * Derivation:
898 * 898 *
@@ -1223,10 +1223,9 @@ free_buffers:
1223} 1223}
1224 1224
1225STATIC int 1225STATIC int
1226__xfs_get_block( 1226__xfs_get_blocks(
1227 struct inode *inode, 1227 struct inode *inode,
1228 sector_t iblock, 1228 sector_t iblock,
1229 unsigned long blocks,
1230 struct buffer_head *bh_result, 1229 struct buffer_head *bh_result,
1231 int create, 1230 int create,
1232 int direct, 1231 int direct,
@@ -1236,22 +1235,17 @@ __xfs_get_block(
1236 xfs_iomap_t iomap; 1235 xfs_iomap_t iomap;
1237 xfs_off_t offset; 1236 xfs_off_t offset;
1238 ssize_t size; 1237 ssize_t size;
1239 int retpbbm = 1; 1238 int niomap = 1;
1240 int error; 1239 int error;
1241 1240
1242 offset = (xfs_off_t)iblock << inode->i_blkbits; 1241 offset = (xfs_off_t)iblock << inode->i_blkbits;
1243 if (blocks) 1242 ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
1244 size = (ssize_t) min_t(xfs_off_t, LONG_MAX, 1243 size = bh_result->b_size;
1245 (xfs_off_t)blocks << inode->i_blkbits);
1246 else
1247 size = 1 << inode->i_blkbits;
1248
1249 VOP_BMAP(vp, offset, size, 1244 VOP_BMAP(vp, offset, size,
1250 create ? flags : BMAPI_READ, &iomap, &retpbbm, error); 1245 create ? flags : BMAPI_READ, &iomap, &niomap, error);
1251 if (error) 1246 if (error)
1252 return -error; 1247 return -error;
1253 1248 if (niomap == 0)
1254 if (retpbbm == 0)
1255 return 0; 1249 return 0;
1256 1250
1257 if (iomap.iomap_bn != IOMAP_DADDR_NULL) { 1251 if (iomap.iomap_bn != IOMAP_DADDR_NULL) {
@@ -1271,12 +1265,16 @@ __xfs_get_block(
1271 } 1265 }
1272 } 1266 }
1273 1267
1274 /* If this is a realtime file, data might be on a new device */ 1268 /*
1269 * If this is a realtime file, data may be on a different device.
1270 * to that pointed to from the buffer_head b_bdev currently.
1271 */
1275 bh_result->b_bdev = iomap.iomap_target->bt_bdev; 1272 bh_result->b_bdev = iomap.iomap_target->bt_bdev;
1276 1273
1277 /* If we previously allocated a block out beyond eof and 1274 /*
1278 * we are now coming back to use it then we will need to 1275 * If we previously allocated a block out beyond eof and we are
1279 * flag it as new even if it has a disk address. 1276 * now coming back to use it then we will need to flag it as new
1277 * even if it has a disk address.
1280 */ 1278 */
1281 if (create && 1279 if (create &&
1282 ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) || 1280 ((!buffer_mapped(bh_result) && !buffer_uptodate(bh_result)) ||
@@ -1292,26 +1290,24 @@ __xfs_get_block(
1292 } 1290 }
1293 } 1291 }
1294 1292
1295 if (blocks) { 1293 if (direct || size > (1 << inode->i_blkbits)) {
1296 ASSERT(iomap.iomap_bsize - iomap.iomap_delta > 0); 1294 ASSERT(iomap.iomap_bsize - iomap.iomap_delta > 0);
1297 offset = min_t(xfs_off_t, 1295 offset = min_t(xfs_off_t,
1298 iomap.iomap_bsize - iomap.iomap_delta, 1296 iomap.iomap_bsize - iomap.iomap_delta, size);
1299 (xfs_off_t)blocks << inode->i_blkbits); 1297 bh_result->b_size = (ssize_t)min_t(xfs_off_t, LONG_MAX, offset);
1300 bh_result->b_size = (u32) min_t(xfs_off_t, UINT_MAX, offset);
1301 } 1298 }
1302 1299
1303 return 0; 1300 return 0;
1304} 1301}
1305 1302
1306int 1303int
1307xfs_get_block( 1304xfs_get_blocks(
1308 struct inode *inode, 1305 struct inode *inode,
1309 sector_t iblock, 1306 sector_t iblock,
1310 struct buffer_head *bh_result, 1307 struct buffer_head *bh_result,
1311 int create) 1308 int create)
1312{ 1309{
1313 return __xfs_get_block(inode, iblock, 1310 return __xfs_get_blocks(inode, iblock,
1314 bh_result->b_size >> inode->i_blkbits,
1315 bh_result, create, 0, BMAPI_WRITE); 1311 bh_result, create, 0, BMAPI_WRITE);
1316} 1312}
1317 1313
@@ -1322,8 +1318,7 @@ xfs_get_blocks_direct(
1322 struct buffer_head *bh_result, 1318 struct buffer_head *bh_result,
1323 int create) 1319 int create)
1324{ 1320{
1325 return __xfs_get_block(inode, iblock, 1321 return __xfs_get_blocks(inode, iblock,
1326 bh_result->b_size >> inode->i_blkbits,
1327 bh_result, create, 1, BMAPI_WRITE|BMAPI_DIRECT); 1322 bh_result, create, 1, BMAPI_WRITE|BMAPI_DIRECT);
1328} 1323}
1329 1324
@@ -1339,9 +1334,9 @@ xfs_end_io_direct(
1339 /* 1334 /*
1340 * Non-NULL private data means we need to issue a transaction to 1335 * Non-NULL private data means we need to issue a transaction to
1341 * convert a range from unwritten to written extents. This needs 1336 * convert a range from unwritten to written extents. This needs
1342 * to happen from process contect but aio+dio I/O completion 1337 * to happen from process context but aio+dio I/O completion
1343 * happens from irq context so we need to defer it to a workqueue. 1338 * happens from irq context so we need to defer it to a workqueue.
1344 * This is not nessecary for synchronous direct I/O, but we do 1339 * This is not necessary for synchronous direct I/O, but we do
1345 * it anyway to keep the code uniform and simpler. 1340 * it anyway to keep the code uniform and simpler.
1346 * 1341 *
1347 * The core direct I/O code might be changed to always call the 1342 * The core direct I/O code might be changed to always call the
@@ -1358,7 +1353,7 @@ xfs_end_io_direct(
1358 } 1353 }
1359 1354
1360 /* 1355 /*
1361 * blockdev_direct_IO can return an error even afer the I/O 1356 * blockdev_direct_IO can return an error even after the I/O
1362 * completion handler was called. Thus we need to protect 1357 * completion handler was called. Thus we need to protect
1363 * against double-freeing. 1358 * against double-freeing.
1364 */ 1359 */
@@ -1405,7 +1400,7 @@ xfs_vm_prepare_write(
1405 unsigned int from, 1400 unsigned int from,
1406 unsigned int to) 1401 unsigned int to)
1407{ 1402{
1408 return block_prepare_write(page, from, to, xfs_get_block); 1403 return block_prepare_write(page, from, to, xfs_get_blocks);
1409} 1404}
1410 1405
1411STATIC sector_t 1406STATIC sector_t
@@ -1422,7 +1417,7 @@ xfs_vm_bmap(
1422 VOP_RWLOCK(vp, VRWLOCK_READ); 1417 VOP_RWLOCK(vp, VRWLOCK_READ);
1423 VOP_FLUSH_PAGES(vp, (xfs_off_t)0, -1, 0, FI_REMAPF, error); 1418 VOP_FLUSH_PAGES(vp, (xfs_off_t)0, -1, 0, FI_REMAPF, error);
1424 VOP_RWUNLOCK(vp, VRWLOCK_READ); 1419 VOP_RWUNLOCK(vp, VRWLOCK_READ);
1425 return generic_block_bmap(mapping, block, xfs_get_block); 1420 return generic_block_bmap(mapping, block, xfs_get_blocks);
1426} 1421}
1427 1422
1428STATIC int 1423STATIC int
@@ -1430,7 +1425,7 @@ xfs_vm_readpage(
1430 struct file *unused, 1425 struct file *unused,
1431 struct page *page) 1426 struct page *page)
1432{ 1427{
1433 return mpage_readpage(page, xfs_get_block); 1428 return mpage_readpage(page, xfs_get_blocks);
1434} 1429}
1435 1430
1436STATIC int 1431STATIC int
@@ -1440,7 +1435,7 @@ xfs_vm_readpages(
1440 struct list_head *pages, 1435 struct list_head *pages,
1441 unsigned nr_pages) 1436 unsigned nr_pages)
1442{ 1437{
1443 return mpage_readpages(mapping, pages, nr_pages, xfs_get_block); 1438 return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks);
1444} 1439}
1445 1440
1446STATIC void 1441STATIC void
diff --git a/fs/xfs/linux-2.6/xfs_aops.h b/fs/xfs/linux-2.6/xfs_aops.h
index 795699f121d2..60716543c68b 100644
--- a/fs/xfs/linux-2.6/xfs_aops.h
+++ b/fs/xfs/linux-2.6/xfs_aops.h
@@ -41,6 +41,6 @@ typedef struct xfs_ioend {
41} xfs_ioend_t; 41} xfs_ioend_t;
42 42
43extern struct address_space_operations xfs_address_space_operations; 43extern struct address_space_operations xfs_address_space_operations;
44extern int xfs_get_block(struct inode *, sector_t, struct buffer_head *, int); 44extern int xfs_get_blocks(struct inode *, sector_t, struct buffer_head *, int);
45 45
46#endif /* __XFS_IOPS_H__ */ 46#endif /* __XFS_IOPS_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_export.h b/fs/xfs/linux-2.6/xfs_export.h
index e5b0559700a4..e794ca4efc76 100644
--- a/fs/xfs/linux-2.6/xfs_export.h
+++ b/fs/xfs/linux-2.6/xfs_export.h
@@ -54,7 +54,7 @@
54 * Note, the NFS filehandle also includes an fsid portion which 54 * Note, the NFS filehandle also includes an fsid portion which
55 * may have an inode number in it. That number is hardcoded to 55 * may have an inode number in it. That number is hardcoded to
56 * 32bits and there is no way for XFS to intercept it. In 56 * 32bits and there is no way for XFS to intercept it. In
57 * practice this means when exporting an XFS filesytem with 64bit 57 * practice this means when exporting an XFS filesystem with 64bit
58 * inodes you should either export the mountpoint (rather than 58 * inodes you should either export the mountpoint (rather than
59 * a subdirectory) or use the "fsid" export option. 59 * a subdirectory) or use the "fsid" export option.
60 */ 60 */
diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c
index b6321abd9a81..251bfe451a3f 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl32.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl32.c
@@ -72,7 +72,7 @@ xfs_ioctl32_flock(
72 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) || 72 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
73 copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32))) 73 copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
74 return -EFAULT; 74 return -EFAULT;
75 75
76 return (unsigned long)p; 76 return (unsigned long)p;
77} 77}
78 78
@@ -107,11 +107,15 @@ xfs_ioctl32_bulkstat(
107#endif 107#endif
108 108
109STATIC long 109STATIC long
110xfs_compat_ioctl(int mode, struct file *f, unsigned cmd, unsigned long arg) 110xfs_compat_ioctl(
111 int mode,
112 struct file *file,
113 unsigned cmd,
114 unsigned long arg)
111{ 115{
116 struct inode *inode = file->f_dentry->d_inode;
117 vnode_t *vp = vn_from_inode(inode);
112 int error; 118 int error;
113 struct inode *inode = f->f_dentry->d_inode;
114 vnode_t *vp = vn_to_inode(inode);
115 119
116 switch (cmd) { 120 switch (cmd) {
117 case XFS_IOC_DIOINFO: 121 case XFS_IOC_DIOINFO:
@@ -189,7 +193,7 @@ xfs_compat_ioctl(int mode, struct file *f, unsigned cmd, unsigned long arg)
189 return -ENOIOCTLCMD; 193 return -ENOIOCTLCMD;
190 } 194 }
191 195
192 VOP_IOCTL(vp, inode, f, mode, cmd, (void __user *)arg, error); 196 VOP_IOCTL(vp, inode, file, mode, cmd, (void __user *)arg, error);
193 VMODIFY(vp); 197 VMODIFY(vp);
194 198
195 return error; 199 return error;
@@ -197,18 +201,18 @@ xfs_compat_ioctl(int mode, struct file *f, unsigned cmd, unsigned long arg)
197 201
198long 202long
199xfs_file_compat_ioctl( 203xfs_file_compat_ioctl(
200 struct file *f, 204 struct file *file,
201 unsigned cmd, 205 unsigned cmd,
202 unsigned long arg) 206 unsigned long arg)
203{ 207{
204 return xfs_compat_ioctl(0, f, cmd, arg); 208 return xfs_compat_ioctl(0, file, cmd, arg);
205} 209}
206 210
207long 211long
208xfs_file_compat_invis_ioctl( 212xfs_file_compat_invis_ioctl(
209 struct file *f, 213 struct file *file,
210 unsigned cmd, 214 unsigned cmd,
211 unsigned long arg) 215 unsigned long arg)
212{ 216{
213 return xfs_compat_ioctl(IO_INVIS, f, cmd, arg); 217 return xfs_compat_ioctl(IO_INVIS, file, cmd, arg);
214} 218}
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index af487437bd7e..149237304fb6 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -708,7 +708,7 @@ STATIC void
708xfs_vn_truncate( 708xfs_vn_truncate(
709 struct inode *inode) 709 struct inode *inode)
710{ 710{
711 block_truncate_page(inode->i_mapping, inode->i_size, xfs_get_block); 711 block_truncate_page(inode->i_mapping, inode->i_size, xfs_get_blocks);
712} 712}
713 713
714STATIC int 714STATIC int
diff --git a/fs/xfs/linux-2.6/xfs_lrw.c b/fs/xfs/linux-2.6/xfs_lrw.c
index 0169360475c4..84ddf1893894 100644
--- a/fs/xfs/linux-2.6/xfs_lrw.c
+++ b/fs/xfs/linux-2.6/xfs_lrw.c
@@ -681,7 +681,7 @@ start:
681 eventsent = 1; 681 eventsent = 1;
682 682
683 /* 683 /*
684 * The iolock was dropped and reaquired in XFS_SEND_DATA 684 * The iolock was dropped and reacquired in XFS_SEND_DATA
685 * so we have to recheck the size when appending. 685 * so we have to recheck the size when appending.
686 * We will only "goto start;" once, since having sent the 686 * We will only "goto start;" once, since having sent the
687 * event prevents another call to XFS_SEND_DATA, which is 687 * event prevents another call to XFS_SEND_DATA, which is
diff --git a/fs/xfs/linux-2.6/xfs_vfs.h b/fs/xfs/linux-2.6/xfs_vfs.h
index 8fed356db055..841200c03092 100644
--- a/fs/xfs/linux-2.6/xfs_vfs.h
+++ b/fs/xfs/linux-2.6/xfs_vfs.h
@@ -92,7 +92,7 @@ typedef enum {
92#define SYNC_FSDATA 0x0020 /* flush fs data (e.g. superblocks) */ 92#define SYNC_FSDATA 0x0020 /* flush fs data (e.g. superblocks) */
93#define SYNC_REFCACHE 0x0040 /* prune some of the nfs ref cache */ 93#define SYNC_REFCACHE 0x0040 /* prune some of the nfs ref cache */
94#define SYNC_REMOUNT 0x0080 /* remount readonly, no dummy LRs */ 94#define SYNC_REMOUNT 0x0080 /* remount readonly, no dummy LRs */
95#define SYNC_QUIESCE 0x0100 /* quiesce fileystem for a snapshot */ 95#define SYNC_QUIESCE 0x0100 /* quiesce filesystem for a snapshot */
96 96
97typedef int (*vfs_mount_t)(bhv_desc_t *, 97typedef int (*vfs_mount_t)(bhv_desc_t *,
98 struct xfs_mount_args *, struct cred *); 98 struct xfs_mount_args *, struct cred *);
diff --git a/fs/xfs/quota/xfs_dquot_item.c b/fs/xfs/quota/xfs_dquot_item.c
index e4e5f05b841b..546f48af882a 100644
--- a/fs/xfs/quota/xfs_dquot_item.c
+++ b/fs/xfs/quota/xfs_dquot_item.c
@@ -221,7 +221,7 @@ xfs_qm_dqunpin_wait(
221 * as possible. 221 * as possible.
222 * 222 *
223 * We must not be holding the AIL_LOCK at this point. Calling incore() to 223 * We must not be holding the AIL_LOCK at this point. Calling incore() to
224 * search the buffercache can be a time consuming thing, and AIL_LOCK is a 224 * search the buffer cache can be a time consuming thing, and AIL_LOCK is a
225 * spinlock. 225 * spinlock.
226 */ 226 */
227STATIC void 227STATIC void
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c
index 1fb757ef3f41..73c1e5e80c07 100644
--- a/fs/xfs/quota/xfs_qm.c
+++ b/fs/xfs/quota/xfs_qm.c
@@ -289,7 +289,7 @@ xfs_qm_rele_quotafs_ref(
289 289
290/* 290/*
291 * This is called at mount time from xfs_mountfs to initialize the quotainfo 291 * This is called at mount time from xfs_mountfs to initialize the quotainfo
292 * structure and start the global quotamanager (xfs_Gqm) if it hasn't done 292 * structure and start the global quota manager (xfs_Gqm) if it hasn't done
293 * so already. Note that the superblock has not been read in yet. 293 * so already. Note that the superblock has not been read in yet.
294 */ 294 */
295void 295void
@@ -807,7 +807,7 @@ xfs_qm_dqattach_one(
807 * Given a udquot and gdquot, attach a ptr to the group dquot in the 807 * Given a udquot and gdquot, attach a ptr to the group dquot in the
808 * udquot as a hint for future lookups. The idea sounds simple, but the 808 * udquot as a hint for future lookups. The idea sounds simple, but the
809 * execution isn't, because the udquot might have a group dquot attached 809 * execution isn't, because the udquot might have a group dquot attached
810 * already and getting rid of that gets us into lock ordering contraints. 810 * already and getting rid of that gets us into lock ordering constraints.
811 * The process is complicated more by the fact that the dquots may or may not 811 * The process is complicated more by the fact that the dquots may or may not
812 * be locked on entry. 812 * be locked on entry.
813 */ 813 */
@@ -1094,10 +1094,10 @@ xfs_qm_sync(
1094 } 1094 }
1095 /* 1095 /*
1096 * If we can't grab the flush lock then if the caller 1096 * If we can't grab the flush lock then if the caller
1097 * really wanted us to give this our best shot, 1097 * really wanted us to give this our best shot, so
1098 * see if we can give a push to the buffer before we wait 1098 * see if we can give a push to the buffer before we wait
1099 * on the flush lock. At this point, we know that 1099 * on the flush lock. At this point, we know that
1100 * eventhough the dquot is being flushed, 1100 * even though the dquot is being flushed,
1101 * it has (new) dirty data. 1101 * it has (new) dirty data.
1102 */ 1102 */
1103 xfs_qm_dqflock_pushbuf_wait(dqp); 1103 xfs_qm_dqflock_pushbuf_wait(dqp);
@@ -1491,7 +1491,7 @@ xfs_qm_reset_dqcounts(
1491 /* 1491 /*
1492 * Do a sanity check, and if needed, repair the dqblk. Don't 1492 * Do a sanity check, and if needed, repair the dqblk. Don't
1493 * output any warnings because it's perfectly possible to 1493 * output any warnings because it's perfectly possible to
1494 * find unitialized dquot blks. See comment in xfs_qm_dqcheck. 1494 * find uninitialised dquot blks. See comment in xfs_qm_dqcheck.
1495 */ 1495 */
1496 (void) xfs_qm_dqcheck(ddq, id+j, type, XFS_QMOPT_DQREPAIR, 1496 (void) xfs_qm_dqcheck(ddq, id+j, type, XFS_QMOPT_DQREPAIR,
1497 "xfs_quotacheck"); 1497 "xfs_quotacheck");
@@ -1580,7 +1580,7 @@ xfs_qm_dqiterate(
1580 1580
1581 error = 0; 1581 error = 0;
1582 /* 1582 /*
1583 * This looks racey, but we can't keep an inode lock across a 1583 * This looks racy, but we can't keep an inode lock across a
1584 * trans_reserve. But, this gets called during quotacheck, and that 1584 * trans_reserve. But, this gets called during quotacheck, and that
1585 * happens only at mount time which is single threaded. 1585 * happens only at mount time which is single threaded.
1586 */ 1586 */
@@ -1824,7 +1824,7 @@ xfs_qm_dqusage_adjust(
1824 * we have to start from the beginning anyway. 1824 * we have to start from the beginning anyway.
1825 * Once we're done, we'll log all the dquot bufs. 1825 * Once we're done, we'll log all the dquot bufs.
1826 * 1826 *
1827 * The *QUOTA_ON checks below may look pretty racey, but quotachecks 1827 * The *QUOTA_ON checks below may look pretty racy, but quotachecks
1828 * and quotaoffs don't race. (Quotachecks happen at mount time only). 1828 * and quotaoffs don't race. (Quotachecks happen at mount time only).
1829 */ 1829 */
1830 if (XFS_IS_UQUOTA_ON(mp)) { 1830 if (XFS_IS_UQUOTA_ON(mp)) {
diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c
index 676884394aae..c55db463bbf2 100644
--- a/fs/xfs/quota/xfs_qm_syscalls.c
+++ b/fs/xfs/quota/xfs_qm_syscalls.c
@@ -912,7 +912,7 @@ xfs_qm_export_dquot(
912 912
913 /* 913 /*
914 * Internally, we don't reset all the timers when quota enforcement 914 * Internally, we don't reset all the timers when quota enforcement
915 * gets turned off. No need to confuse the userlevel code, 915 * gets turned off. No need to confuse the user level code,
916 * so return zeroes in that case. 916 * so return zeroes in that case.
917 */ 917 */
918 if (! XFS_IS_QUOTA_ENFORCED(mp)) { 918 if (! XFS_IS_QUOTA_ENFORCED(mp)) {
diff --git a/fs/xfs/quota/xfs_trans_dquot.c b/fs/xfs/quota/xfs_trans_dquot.c
index 3290975d31f7..d8e131ec0aa8 100644
--- a/fs/xfs/quota/xfs_trans_dquot.c
+++ b/fs/xfs/quota/xfs_trans_dquot.c
@@ -804,7 +804,7 @@ xfs_trans_reserve_quota_bydquots(
804 } 804 }
805 805
806 /* 806 /*
807 * Didnt change anything critical, so, no need to log 807 * Didn't change anything critical, so, no need to log
808 */ 808 */
809 return (0); 809 return (0);
810} 810}
diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c
index 4ff0f4e41c61..2539af34eb63 100644
--- a/fs/xfs/xfs_acl.c
+++ b/fs/xfs/xfs_acl.c
@@ -395,7 +395,7 @@ xfs_acl_allow_set(
395 * The access control process to determine the access permission: 395 * The access control process to determine the access permission:
396 * if uid == file owner id, use the file owner bits. 396 * if uid == file owner id, use the file owner bits.
397 * if gid == file owner group id, use the file group bits. 397 * if gid == file owner group id, use the file group bits.
398 * scan ACL for a maching user or group, and use matched entry 398 * scan ACL for a matching user or group, and use matched entry
399 * permission. Use total permissions of all matching group entries, 399 * permission. Use total permissions of all matching group entries,
400 * until all acl entries are exhausted. The final permission produced 400 * until all acl entries are exhausted. The final permission produced
401 * by matching acl entry or entries needs to be & with group permission. 401 * by matching acl entry or entries needs to be & with group permission.
diff --git a/fs/xfs/xfs_ag.h b/fs/xfs/xfs_ag.h
index a96e2ffce0cc..dc2361dd740a 100644
--- a/fs/xfs/xfs_ag.h
+++ b/fs/xfs/xfs_ag.h
@@ -179,7 +179,7 @@ typedef struct xfs_perag
179{ 179{
180 char pagf_init; /* this agf's entry is initialized */ 180 char pagf_init; /* this agf's entry is initialized */
181 char pagi_init; /* this agi's entry is initialized */ 181 char pagi_init; /* this agi's entry is initialized */
182 char pagf_metadata; /* the agf is prefered to be metadata */ 182 char pagf_metadata; /* the agf is preferred to be metadata */
183 char pagi_inodeok; /* The agi is ok for inodes */ 183 char pagi_inodeok; /* The agi is ok for inodes */
184 __uint8_t pagf_levels[XFS_BTNUM_AGF]; 184 __uint8_t pagf_levels[XFS_BTNUM_AGF];
185 /* # of levels in bno & cnt btree */ 185 /* # of levels in bno & cnt btree */
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c
index f4328e1e2a74..64ee07db0d5e 100644
--- a/fs/xfs/xfs_alloc.c
+++ b/fs/xfs/xfs_alloc.c
@@ -511,7 +511,7 @@ STATIC void
511xfs_alloc_trace_busy( 511xfs_alloc_trace_busy(
512 char *name, /* function tag string */ 512 char *name, /* function tag string */
513 char *str, /* additional string */ 513 char *str, /* additional string */
514 xfs_mount_t *mp, /* file system mount poing */ 514 xfs_mount_t *mp, /* file system mount point */
515 xfs_agnumber_t agno, /* allocation group number */ 515 xfs_agnumber_t agno, /* allocation group number */
516 xfs_agblock_t agbno, /* a.g. relative block number */ 516 xfs_agblock_t agbno, /* a.g. relative block number */
517 xfs_extlen_t len, /* length of extent */ 517 xfs_extlen_t len, /* length of extent */
@@ -1843,7 +1843,7 @@ xfs_alloc_fix_freelist(
1843 } else 1843 } else
1844 agbp = NULL; 1844 agbp = NULL;
1845 1845
1846 /* If this is a metadata prefered pag and we are user data 1846 /* If this is a metadata preferred pag and we are user data
1847 * then try somewhere else if we are not being asked to 1847 * then try somewhere else if we are not being asked to
1848 * try harder at this point 1848 * try harder at this point
1849 */ 1849 */
@@ -2458,7 +2458,7 @@ error0:
2458/* 2458/*
2459 * AG Busy list management 2459 * AG Busy list management
2460 * The busy list contains block ranges that have been freed but whose 2460 * The busy list contains block ranges that have been freed but whose
2461 * transacations have not yet hit disk. If any block listed in a busy 2461 * transactions have not yet hit disk. If any block listed in a busy
2462 * list is reused, the transaction that freed it must be forced to disk 2462 * list is reused, the transaction that freed it must be forced to disk
2463 * before continuing to use the block. 2463 * before continuing to use the block.
2464 * 2464 *
diff --git a/fs/xfs/xfs_alloc.h b/fs/xfs/xfs_alloc.h
index 3546dea27b7d..2d1f8928b267 100644
--- a/fs/xfs/xfs_alloc.h
+++ b/fs/xfs/xfs_alloc.h
@@ -68,7 +68,7 @@ typedef struct xfs_alloc_arg {
68 xfs_alloctype_t otype; /* original allocation type */ 68 xfs_alloctype_t otype; /* original allocation type */
69 char wasdel; /* set if allocation was prev delayed */ 69 char wasdel; /* set if allocation was prev delayed */
70 char wasfromfl; /* set if allocation is from freelist */ 70 char wasfromfl; /* set if allocation is from freelist */
71 char isfl; /* set if is freelist blocks - !actg */ 71 char isfl; /* set if is freelist blocks - !acctg */
72 char userdata; /* set if this is user data */ 72 char userdata; /* set if this is user data */
73} xfs_alloc_arg_t; 73} xfs_alloc_arg_t;
74 74
diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/xfs_attr.c
index 093fac476bda..b6e1e02bbb28 100644
--- a/fs/xfs/xfs_attr.c
+++ b/fs/xfs/xfs_attr.c
@@ -294,7 +294,7 @@ xfs_attr_set_int(xfs_inode_t *dp, const char *name, int namelen,
294 xfs_trans_ihold(args.trans, dp); 294 xfs_trans_ihold(args.trans, dp);
295 295
296 /* 296 /*
297 * If the attribute list is non-existant or a shortform list, 297 * If the attribute list is non-existent or a shortform list,
298 * upgrade it to a single-leaf-block attribute list. 298 * upgrade it to a single-leaf-block attribute list.
299 */ 299 */
300 if ((dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) || 300 if ((dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) ||
@@ -1584,7 +1584,7 @@ out:
1584 * Fill in the disk block numbers in the state structure for the buffers 1584 * Fill in the disk block numbers in the state structure for the buffers
1585 * that are attached to the state structure. 1585 * that are attached to the state structure.
1586 * This is done so that we can quickly reattach ourselves to those buffers 1586 * This is done so that we can quickly reattach ourselves to those buffers
1587 * after some set of transaction commit's has released these buffers. 1587 * after some set of transaction commits have released these buffers.
1588 */ 1588 */
1589STATIC int 1589STATIC int
1590xfs_attr_fillstate(xfs_da_state_t *state) 1590xfs_attr_fillstate(xfs_da_state_t *state)
@@ -1631,7 +1631,7 @@ xfs_attr_fillstate(xfs_da_state_t *state)
1631/* 1631/*
1632 * Reattach the buffers to the state structure based on the disk block 1632 * Reattach the buffers to the state structure based on the disk block
1633 * numbers stored in the state structure. 1633 * numbers stored in the state structure.
1634 * This is done after some set of transaction commit's has released those 1634 * This is done after some set of transaction commits have released those
1635 * buffers from our grip. 1635 * buffers from our grip.
1636 */ 1636 */
1637STATIC int 1637STATIC int
diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c
index 717682747bd2..9462be86aa14 100644
--- a/fs/xfs/xfs_attr_leaf.c
+++ b/fs/xfs/xfs_attr_leaf.c
@@ -524,7 +524,7 @@ xfs_attr_shortform_compare(const void *a, const void *b)
524 524
525/* 525/*
526 * Copy out entries of shortform attribute lists for attr_list(). 526 * Copy out entries of shortform attribute lists for attr_list().
527 * Shortform atrtribute lists are not stored in hashval sorted order. 527 * Shortform attribute lists are not stored in hashval sorted order.
528 * If the output buffer is not large enough to hold them all, then we 528 * If the output buffer is not large enough to hold them all, then we
529 * we have to calculate each entries' hashvalue and sort them before 529 * we have to calculate each entries' hashvalue and sort them before
530 * we can begin returning them to the user. 530 * we can begin returning them to the user.
@@ -1541,7 +1541,7 @@ xfs_attr_leaf_toosmall(xfs_da_state_t *state, int *action)
1541 /* 1541 /*
1542 * Check for the degenerate case of the block being empty. 1542 * Check for the degenerate case of the block being empty.
1543 * If the block is empty, we'll simply delete it, no need to 1543 * If the block is empty, we'll simply delete it, no need to
1544 * coalesce it with a sibling block. We choose (aribtrarily) 1544 * coalesce it with a sibling block. We choose (arbitrarily)
1545 * to merge with the forward block unless it is NULL. 1545 * to merge with the forward block unless it is NULL.
1546 */ 1546 */
1547 if (count == 0) { 1547 if (count == 0) {
diff --git a/fs/xfs/xfs_behavior.c b/fs/xfs/xfs_behavior.c
index 9880adae3938..f4fe3715a803 100644
--- a/fs/xfs/xfs_behavior.c
+++ b/fs/xfs/xfs_behavior.c
@@ -31,7 +31,7 @@
31 * The behavior chain is ordered based on the 'position' number which 31 * The behavior chain is ordered based on the 'position' number which
32 * lives in the first field of the ops vector (higher numbers first). 32 * lives in the first field of the ops vector (higher numbers first).
33 * 33 *
34 * Attemps to insert duplicate ops result in an EINVAL return code. 34 * Attempts to insert duplicate ops result in an EINVAL return code.
35 * Otherwise, return 0 to indicate success. 35 * Otherwise, return 0 to indicate success.
36 */ 36 */
37int 37int
@@ -84,7 +84,7 @@ bhv_insert(bhv_head_t *bhp, bhv_desc_t *bdp)
84 84
85/* 85/*
86 * Remove a behavior descriptor from a position in a behavior chain; 86 * Remove a behavior descriptor from a position in a behavior chain;
87 * the postition is guaranteed not to be the first position. 87 * the position is guaranteed not to be the first position.
88 * Should only be called by the bhv_remove() macro. 88 * Should only be called by the bhv_remove() macro.
89 */ 89 */
90void 90void
diff --git a/fs/xfs/xfs_behavior.h b/fs/xfs/xfs_behavior.h
index 2cd89bb5ab10..1d8ff103201c 100644
--- a/fs/xfs/xfs_behavior.h
+++ b/fs/xfs/xfs_behavior.h
@@ -39,7 +39,7 @@
39 * behaviors is synchronized with operations-in-progress (oip's) so that 39 * behaviors is synchronized with operations-in-progress (oip's) so that
40 * the oip's always see a consistent view of the chain. 40 * the oip's always see a consistent view of the chain.
41 * 41 *
42 * The term "interpostion" is used to refer to the act of inserting 42 * The term "interposition" is used to refer to the act of inserting
43 * a behavior such that it interposes on (i.e., is inserted in front 43 * a behavior such that it interposes on (i.e., is inserted in front
44 * of) a particular other behavior. A key example of this is when a 44 * of) a particular other behavior. A key example of this is when a
45 * system implementing distributed single system image wishes to 45 * system implementing distributed single system image wishes to
@@ -51,7 +51,7 @@
51 * 51 *
52 * Behavior synchronization is logic which is necessary under certain 52 * Behavior synchronization is logic which is necessary under certain
53 * circumstances that there is no conflict between ongoing operations 53 * circumstances that there is no conflict between ongoing operations
54 * traversing the behavior chain and those dunamically modifying the 54 * traversing the behavior chain and those dynamically modifying the
55 * behavior chain. Because behavior synchronization adds extra overhead 55 * behavior chain. Because behavior synchronization adds extra overhead
56 * to virtual operation invocation, we want to restrict, as much as 56 * to virtual operation invocation, we want to restrict, as much as
57 * we can, the requirement for this extra code, to those situations 57 * we can, the requirement for this extra code, to those situations
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index 2d702e4a74a3..d384e489705f 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -3467,113 +3467,6 @@ done:
3467 return error; 3467 return error;
3468} 3468}
3469 3469
3470xfs_bmbt_rec_t * /* pointer to found extent entry */
3471xfs_bmap_do_search_extents(
3472 xfs_bmbt_rec_t *base, /* base of extent list */
3473 xfs_extnum_t lastx, /* last extent index used */
3474 xfs_extnum_t nextents, /* number of file extents */
3475 xfs_fileoff_t bno, /* block number searched for */
3476 int *eofp, /* out: end of file found */
3477 xfs_extnum_t *lastxp, /* out: last extent index */
3478 xfs_bmbt_irec_t *gotp, /* out: extent entry found */
3479 xfs_bmbt_irec_t *prevp) /* out: previous extent entry found */
3480{
3481 xfs_bmbt_rec_t *ep; /* extent list entry pointer */
3482 xfs_bmbt_irec_t got; /* extent list entry, decoded */
3483 int high; /* high index of binary search */
3484 int low; /* low index of binary search */
3485
3486 /*
3487 * Initialize the extent entry structure to catch access to
3488 * uninitialized br_startblock field.
3489 */
3490 got.br_startoff = 0xffa5a5a5a5a5a5a5LL;
3491 got.br_blockcount = 0xa55a5a5a5a5a5a5aLL;
3492 got.br_state = XFS_EXT_INVALID;
3493
3494#if XFS_BIG_BLKNOS
3495 got.br_startblock = 0xffffa5a5a5a5a5a5LL;
3496#else
3497 got.br_startblock = 0xffffa5a5;
3498#endif
3499
3500 if (lastx != NULLEXTNUM && lastx < nextents)
3501 ep = base + lastx;
3502 else
3503 ep = NULL;
3504 prevp->br_startoff = NULLFILEOFF;
3505 if (ep && bno >= (got.br_startoff = xfs_bmbt_get_startoff(ep)) &&
3506 bno < got.br_startoff +
3507 (got.br_blockcount = xfs_bmbt_get_blockcount(ep)))
3508 *eofp = 0;
3509 else if (ep && lastx < nextents - 1 &&
3510 bno >= (got.br_startoff = xfs_bmbt_get_startoff(ep + 1)) &&
3511 bno < got.br_startoff +
3512 (got.br_blockcount = xfs_bmbt_get_blockcount(ep + 1))) {
3513 lastx++;
3514 ep++;
3515 *eofp = 0;
3516 } else if (nextents == 0)
3517 *eofp = 1;
3518 else if (bno == 0 &&
3519 (got.br_startoff = xfs_bmbt_get_startoff(base)) == 0) {
3520 ep = base;
3521 lastx = 0;
3522 got.br_blockcount = xfs_bmbt_get_blockcount(ep);
3523 *eofp = 0;
3524 } else {
3525 low = 0;
3526 high = nextents - 1;
3527 /* binary search the extents array */
3528 while (low <= high) {
3529 XFS_STATS_INC(xs_cmp_exlist);
3530 lastx = (low + high) >> 1;
3531 ep = base + lastx;
3532 got.br_startoff = xfs_bmbt_get_startoff(ep);
3533 got.br_blockcount = xfs_bmbt_get_blockcount(ep);
3534 if (bno < got.br_startoff)
3535 high = lastx - 1;
3536 else if (bno >= got.br_startoff + got.br_blockcount)
3537 low = lastx + 1;
3538 else {
3539 got.br_startblock = xfs_bmbt_get_startblock(ep);
3540 got.br_state = xfs_bmbt_get_state(ep);
3541 *eofp = 0;
3542 *lastxp = lastx;
3543 *gotp = got;
3544 return ep;
3545 }
3546 }
3547 if (bno >= got.br_startoff + got.br_blockcount) {
3548 lastx++;
3549 if (lastx == nextents) {
3550 *eofp = 1;
3551 got.br_startblock = xfs_bmbt_get_startblock(ep);
3552 got.br_state = xfs_bmbt_get_state(ep);
3553 *prevp = got;
3554 ep = NULL;
3555 } else {
3556 *eofp = 0;
3557 xfs_bmbt_get_all(ep, prevp);
3558 ep++;
3559 got.br_startoff = xfs_bmbt_get_startoff(ep);
3560 got.br_blockcount = xfs_bmbt_get_blockcount(ep);
3561 }
3562 } else {
3563 *eofp = 0;
3564 if (ep > base)
3565 xfs_bmbt_get_all(ep - 1, prevp);
3566 }
3567 }
3568 if (ep) {
3569 got.br_startblock = xfs_bmbt_get_startblock(ep);
3570 got.br_state = xfs_bmbt_get_state(ep);
3571 }
3572 *lastxp = lastx;
3573 *gotp = got;
3574 return ep;
3575}
3576
3577/* 3470/*
3578 * Search the extent records for the entry containing block bno. 3471 * Search the extent records for the entry containing block bno.
3579 * If bno lies in a hole, point to the next entry. If bno lies 3472 * If bno lies in a hole, point to the next entry. If bno lies
diff --git a/fs/xfs/xfs_bmap.h b/fs/xfs/xfs_bmap.h
index 011ccaa9a1c0..f83399c89ce3 100644
--- a/fs/xfs/xfs_bmap.h
+++ b/fs/xfs/xfs_bmap.h
@@ -362,14 +362,6 @@ xfs_bmbt_rec_t *
362xfs_bmap_search_multi_extents(struct xfs_ifork *, xfs_fileoff_t, int *, 362xfs_bmap_search_multi_extents(struct xfs_ifork *, xfs_fileoff_t, int *,
363 xfs_extnum_t *, xfs_bmbt_irec_t *, xfs_bmbt_irec_t *); 363 xfs_extnum_t *, xfs_bmbt_irec_t *, xfs_bmbt_irec_t *);
364 364
365/*
366 * Search an extent list for the extent which includes block
367 * bno.
368 */
369xfs_bmbt_rec_t *xfs_bmap_do_search_extents(xfs_bmbt_rec_t *,
370 xfs_extnum_t, xfs_extnum_t, xfs_fileoff_t, int *,
371 xfs_extnum_t *, xfs_bmbt_irec_t *, xfs_bmbt_irec_t *);
372
373#endif /* __KERNEL__ */ 365#endif /* __KERNEL__ */
374 366
375#endif /* __XFS_BMAP_H__ */ 367#endif /* __XFS_BMAP_H__ */
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 07e2324152b1..5fed15682dda 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -98,12 +98,12 @@ xfs_buf_item_flush_log_debug(
98} 98}
99 99
100/* 100/*
101 * This function is called to verify that our caller's have logged 101 * This function is called to verify that our callers have logged
102 * all the bytes that they changed. 102 * all the bytes that they changed.
103 * 103 *
104 * It does this by comparing the original copy of the buffer stored in 104 * It does this by comparing the original copy of the buffer stored in
105 * the buf log item's bli_orig array to the current copy of the buffer 105 * the buf log item's bli_orig array to the current copy of the buffer
106 * and ensuring that all bytes which miscompare are set in the bli_logged 106 * and ensuring that all bytes which mismatch are set in the bli_logged
107 * array of the buf log item. 107 * array of the buf log item.
108 */ 108 */
109STATIC void 109STATIC void
diff --git a/fs/xfs/xfs_cap.h b/fs/xfs/xfs_cap.h
index 433ec537f9bd..d0035c6e9514 100644
--- a/fs/xfs/xfs_cap.h
+++ b/fs/xfs/xfs_cap.h
@@ -38,7 +38,7 @@ typedef struct xfs_cap_set {
38/* 38/*
39 * For Linux, we take the bitfields directly from capability.h 39 * For Linux, we take the bitfields directly from capability.h
40 * and no longer attempt to keep this attribute ondisk compatible 40 * and no longer attempt to keep this attribute ondisk compatible
41 * with IRIX. Since this attribute is only set on exectuables, 41 * with IRIX. Since this attribute is only set on executables,
42 * it just doesn't make much sense to try. We do use a different 42 * it just doesn't make much sense to try. We do use a different
43 * named attribute though, to avoid confusion. 43 * named attribute though, to avoid confusion.
44 */ 44 */
diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c
index 4bae3a76c678..8988b9051175 100644
--- a/fs/xfs/xfs_da_btree.c
+++ b/fs/xfs/xfs_da_btree.c
@@ -840,7 +840,7 @@ xfs_da_node_toosmall(xfs_da_state_t *state, int *action)
840 /* 840 /*
841 * Check for the degenerate case of the block being empty. 841 * Check for the degenerate case of the block being empty.
842 * If the block is empty, we'll simply delete it, no need to 842 * If the block is empty, we'll simply delete it, no need to
843 * coalesce it with a sibling block. We choose (aribtrarily) 843 * coalesce it with a sibling block. We choose (arbitrarily)
844 * to merge with the forward block unless it is NULL. 844 * to merge with the forward block unless it is NULL.
845 */ 845 */
846 if (count == 0) { 846 if (count == 0) {
diff --git a/fs/xfs/xfs_dir2_block.c b/fs/xfs/xfs_dir2_block.c
index bd5cee6aa51a..972ded595476 100644
--- a/fs/xfs/xfs_dir2_block.c
+++ b/fs/xfs/xfs_dir2_block.c
@@ -533,7 +533,7 @@ xfs_dir2_block_getdents(
533 533
534 /* 534 /*
535 * Reached the end of the block. 535 * Reached the end of the block.
536 * Set the offset to a nonexistent block 1 and return. 536 * Set the offset to a non-existent block 1 and return.
537 */ 537 */
538 *eofp = 1; 538 *eofp = 1;
539 539
diff --git a/fs/xfs/xfs_dir2_leaf.c b/fs/xfs/xfs_dir2_leaf.c
index 08648b18265c..0f5e2f2ce6ec 100644
--- a/fs/xfs/xfs_dir2_leaf.c
+++ b/fs/xfs/xfs_dir2_leaf.c
@@ -515,7 +515,7 @@ xfs_dir2_leaf_addname(
515 ASSERT(be32_to_cpu(leaf->ents[highstale].address) == 515 ASSERT(be32_to_cpu(leaf->ents[highstale].address) ==
516 XFS_DIR2_NULL_DATAPTR); 516 XFS_DIR2_NULL_DATAPTR);
517 /* 517 /*
518 * Copy entries down to copver the stale entry 518 * Copy entries down to cover the stale entry
519 * and make room for the new entry. 519 * and make room for the new entry.
520 */ 520 */
521 if (highstale - index > 0) 521 if (highstale - index > 0)
diff --git a/fs/xfs/xfs_dir2_node.c b/fs/xfs/xfs_dir2_node.c
index af556f16a0c7..ac511ab9c52d 100644
--- a/fs/xfs/xfs_dir2_node.c
+++ b/fs/xfs/xfs_dir2_node.c
@@ -830,7 +830,7 @@ xfs_dir2_leafn_rebalance(
830 state->inleaf = 1; 830 state->inleaf = 1;
831 blk2->index = 0; 831 blk2->index = 0;
832 cmn_err(CE_ALERT, 832 cmn_err(CE_ALERT,
833 "xfs_dir2_leafn_rebalance: picked the wrong leaf? reverting orignal leaf: " 833 "xfs_dir2_leafn_rebalance: picked the wrong leaf? reverting original leaf: "
834 "blk1->index %d\n", 834 "blk1->index %d\n",
835 blk1->index); 835 blk1->index);
836 } 836 }
diff --git a/fs/xfs/xfs_dir_leaf.c b/fs/xfs/xfs_dir_leaf.c
index ee88751c3be6..6d711869262f 100644
--- a/fs/xfs/xfs_dir_leaf.c
+++ b/fs/xfs/xfs_dir_leaf.c
@@ -1341,7 +1341,7 @@ xfs_dir_leaf_toosmall(xfs_da_state_t *state, int *action)
1341 /* 1341 /*
1342 * Check for the degenerate case of the block being empty. 1342 * Check for the degenerate case of the block being empty.
1343 * If the block is empty, we'll simply delete it, no need to 1343 * If the block is empty, we'll simply delete it, no need to
1344 * coalesce it with a sibling block. We choose (aribtrarily) 1344 * coalesce it with a sibling block. We choose (arbitrarily)
1345 * to merge with the forward block unless it is NULL. 1345 * to merge with the forward block unless it is NULL.
1346 */ 1346 */
1347 if (count == 0) { 1347 if (count == 0) {
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index 56caa88713ab..dfa3527b20a7 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -477,7 +477,7 @@ xfs_fs_counts(
477 * 477 *
478 * xfs_reserve_blocks is called to set m_resblks 478 * xfs_reserve_blocks is called to set m_resblks
479 * in the in-core mount table. The number of unused reserved blocks 479 * in the in-core mount table. The number of unused reserved blocks
480 * is kept in m_resbls_avail. 480 * is kept in m_resblks_avail.
481 * 481 *
482 * Reserve the requested number of blocks if available. Otherwise return 482 * Reserve the requested number of blocks if available. Otherwise return
483 * as many as possible to satisfy the request. The actual number 483 * as many as possible to satisfy the request. The actual number
diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c
index 0024892841a3..4eeb856183b1 100644
--- a/fs/xfs/xfs_ialloc.c
+++ b/fs/xfs/xfs_ialloc.c
@@ -136,7 +136,7 @@ xfs_ialloc_ag_alloc(
136 int ninodes; /* num inodes per buf */ 136 int ninodes; /* num inodes per buf */
137 xfs_agino_t thisino; /* current inode number, for loop */ 137 xfs_agino_t thisino; /* current inode number, for loop */
138 int version; /* inode version number to use */ 138 int version; /* inode version number to use */
139 int isaligned; /* inode allocation at stripe unit */ 139 int isaligned = 0; /* inode allocation at stripe unit */
140 /* boundary */ 140 /* boundary */
141 141
142 args.tp = tp; 142 args.tp = tp;
@@ -152,47 +152,75 @@ xfs_ialloc_ag_alloc(
152 return XFS_ERROR(ENOSPC); 152 return XFS_ERROR(ENOSPC);
153 args.minlen = args.maxlen = XFS_IALLOC_BLOCKS(args.mp); 153 args.minlen = args.maxlen = XFS_IALLOC_BLOCKS(args.mp);
154 /* 154 /*
155 * Set the alignment for the allocation. 155 * First try to allocate inodes contiguous with the last-allocated
156 * If stripe alignment is turned on then align at stripe unit 156 * chunk of inodes. If the filesystem is striped, this will fill
157 * boundary. 157 * an entire stripe unit with inodes.
158 * If the cluster size is smaller than a filesystem block 158 */
159 * then we're doing I/O for inodes in filesystem block size pieces,
160 * so don't need alignment anyway.
161 */
162 isaligned = 0;
163 if (args.mp->m_sinoalign) {
164 ASSERT(!(args.mp->m_flags & XFS_MOUNT_NOALIGN));
165 args.alignment = args.mp->m_dalign;
166 isaligned = 1;
167 } else if (XFS_SB_VERSION_HASALIGN(&args.mp->m_sb) &&
168 args.mp->m_sb.sb_inoalignmt >=
169 XFS_B_TO_FSBT(args.mp, XFS_INODE_CLUSTER_SIZE(args.mp)))
170 args.alignment = args.mp->m_sb.sb_inoalignmt;
171 else
172 args.alignment = 1;
173 agi = XFS_BUF_TO_AGI(agbp); 159 agi = XFS_BUF_TO_AGI(agbp);
174 /* 160 newino = be32_to_cpu(agi->agi_newino);
175 * Need to figure out where to allocate the inode blocks. 161 if(likely(newino != NULLAGINO)) {
176 * Ideally they should be spaced out through the a.g. 162 args.agbno = XFS_AGINO_TO_AGBNO(args.mp, newino) +
177 * For now, just allocate blocks up front. 163 XFS_IALLOC_BLOCKS(args.mp);
178 */ 164 args.fsbno = XFS_AGB_TO_FSB(args.mp,
179 args.agbno = be32_to_cpu(agi->agi_root); 165 be32_to_cpu(agi->agi_seqno), args.agbno);
180 args.fsbno = XFS_AGB_TO_FSB(args.mp, be32_to_cpu(agi->agi_seqno), 166 args.type = XFS_ALLOCTYPE_THIS_BNO;
181 args.agbno); 167 args.mod = args.total = args.wasdel = args.isfl =
182 /* 168 args.userdata = args.minalignslop = 0;
183 * Allocate a fixed-size extent of inodes. 169 args.prod = 1;
184 */ 170 args.alignment = 1;
185 args.type = XFS_ALLOCTYPE_NEAR_BNO; 171 /*
186 args.mod = args.total = args.wasdel = args.isfl = args.userdata = 172 * Allow space for the inode btree to split.
187 args.minalignslop = 0; 173 */
188 args.prod = 1; 174 args.minleft = XFS_IN_MAXLEVELS(args.mp) - 1;
189 /* 175 if ((error = xfs_alloc_vextent(&args)))
190 * Allow space for the inode btree to split. 176 return error;
191 */ 177 } else
192 args.minleft = XFS_IN_MAXLEVELS(args.mp) - 1; 178 args.fsbno = NULLFSBLOCK;
193 if ((error = xfs_alloc_vextent(&args)))
194 return error;
195 179
180 if (unlikely(args.fsbno == NULLFSBLOCK)) {
181 /*
182 * Set the alignment for the allocation.
183 * If stripe alignment is turned on then align at stripe unit
184 * boundary.
185 * If the cluster size is smaller than a filesystem block
186 * then we're doing I/O for inodes in filesystem block size
187 * pieces, so don't need alignment anyway.
188 */
189 isaligned = 0;
190 if (args.mp->m_sinoalign) {
191 ASSERT(!(args.mp->m_flags & XFS_MOUNT_NOALIGN));
192 args.alignment = args.mp->m_dalign;
193 isaligned = 1;
194 } else if (XFS_SB_VERSION_HASALIGN(&args.mp->m_sb) &&
195 args.mp->m_sb.sb_inoalignmt >=
196 XFS_B_TO_FSBT(args.mp,
197 XFS_INODE_CLUSTER_SIZE(args.mp)))
198 args.alignment = args.mp->m_sb.sb_inoalignmt;
199 else
200 args.alignment = 1;
201 /*
202 * Need to figure out where to allocate the inode blocks.
203 * Ideally they should be spaced out through the a.g.
204 * For now, just allocate blocks up front.
205 */
206 args.agbno = be32_to_cpu(agi->agi_root);
207 args.fsbno = XFS_AGB_TO_FSB(args.mp,
208 be32_to_cpu(agi->agi_seqno), args.agbno);
209 /*
210 * Allocate a fixed-size extent of inodes.
211 */
212 args.type = XFS_ALLOCTYPE_NEAR_BNO;
213 args.mod = args.total = args.wasdel = args.isfl =
214 args.userdata = args.minalignslop = 0;
215 args.prod = 1;
216 /*
217 * Allow space for the inode btree to split.
218 */
219 args.minleft = XFS_IN_MAXLEVELS(args.mp) - 1;
220 if ((error = xfs_alloc_vextent(&args)))
221 return error;
222 }
223
196 /* 224 /*
197 * If stripe alignment is turned on, then try again with cluster 225 * If stripe alignment is turned on, then try again with cluster
198 * alignment. 226 * alignment.
@@ -1023,7 +1051,7 @@ xfs_difree(
1023 rec.ir_freecount++; 1051 rec.ir_freecount++;
1024 1052
1025 /* 1053 /*
1026 * When an inode cluster is free, it becomes elgible for removal 1054 * When an inode cluster is free, it becomes eligible for removal
1027 */ 1055 */
1028 if ((mp->m_flags & XFS_MOUNT_IDELETE) && 1056 if ((mp->m_flags & XFS_MOUNT_IDELETE) &&
1029 (rec.ir_freecount == XFS_IALLOC_INODES(mp))) { 1057 (rec.ir_freecount == XFS_IALLOC_INODES(mp))) {
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
index 3ce35a6f700b..bb33113eef9f 100644
--- a/fs/xfs/xfs_iget.c
+++ b/fs/xfs/xfs_iget.c
@@ -509,7 +509,7 @@ retry:
509 } else { 509 } else {
510 /* 510 /*
511 * If the inode is not fully constructed due to 511 * If the inode is not fully constructed due to
512 * filehandle mistmatches wait for the inode to go 512 * filehandle mismatches wait for the inode to go
513 * away and try again. 513 * away and try again.
514 * 514 *
515 * iget_locked will call __wait_on_freeing_inode 515 * iget_locked will call __wait_on_freeing_inode
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 88a517fad07b..48146bdc6bdd 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -160,7 +160,7 @@ xfs_inotobp(
160 xfs_dinode_t *dip; 160 xfs_dinode_t *dip;
161 161
162 /* 162 /*
163 * Call the space managment code to find the location of the 163 * Call the space management code to find the location of the
164 * inode on disk. 164 * inode on disk.
165 */ 165 */
166 imap.im_blkno = 0; 166 imap.im_blkno = 0;
@@ -837,7 +837,7 @@ xfs_dic2xflags(
837 837
838/* 838/*
839 * Given a mount structure and an inode number, return a pointer 839 * Given a mount structure and an inode number, return a pointer
840 * to a newly allocated in-core inode coresponding to the given 840 * to a newly allocated in-core inode corresponding to the given
841 * inode number. 841 * inode number.
842 * 842 *
843 * Initialize the inode's attributes and extent pointers if it 843 * Initialize the inode's attributes and extent pointers if it
@@ -2723,7 +2723,7 @@ xfs_ipin(
2723/* 2723/*
2724 * Decrement the pin count of the given inode, and wake up 2724 * Decrement the pin count of the given inode, and wake up
2725 * anyone in xfs_iwait_unpin() if the count goes to 0. The 2725 * anyone in xfs_iwait_unpin() if the count goes to 0. The
2726 * inode must have been previoulsy pinned with a call to xfs_ipin(). 2726 * inode must have been previously pinned with a call to xfs_ipin().
2727 */ 2727 */
2728void 2728void
2729xfs_iunpin( 2729xfs_iunpin(
@@ -3690,7 +3690,7 @@ void
3690xfs_iext_add( 3690xfs_iext_add(
3691 xfs_ifork_t *ifp, /* inode fork pointer */ 3691 xfs_ifork_t *ifp, /* inode fork pointer */
3692 xfs_extnum_t idx, /* index to begin adding exts */ 3692 xfs_extnum_t idx, /* index to begin adding exts */
3693 int ext_diff) /* nubmer of extents to add */ 3693 int ext_diff) /* number of extents to add */
3694{ 3694{
3695 int byte_diff; /* new bytes being added */ 3695 int byte_diff; /* new bytes being added */
3696 int new_size; /* size of extents after adding */ 3696 int new_size; /* size of extents after adding */
@@ -4038,7 +4038,7 @@ xfs_iext_remove_indirect(
4038 xfs_extnum_t ext_diff; /* extents to remove in current list */ 4038 xfs_extnum_t ext_diff; /* extents to remove in current list */
4039 xfs_extnum_t nex1; /* number of extents before idx */ 4039 xfs_extnum_t nex1; /* number of extents before idx */
4040 xfs_extnum_t nex2; /* extents after idx + count */ 4040 xfs_extnum_t nex2; /* extents after idx + count */
4041 int nlists; /* entries in indirecton array */ 4041 int nlists; /* entries in indirection array */
4042 int page_idx = idx; /* index in target extent list */ 4042 int page_idx = idx; /* index in target extent list */
4043 4043
4044 ASSERT(ifp->if_flags & XFS_IFEXTIREC); 4044 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
@@ -4291,9 +4291,9 @@ xfs_iext_bno_to_ext(
4291 xfs_filblks_t blockcount = 0; /* number of blocks in extent */ 4291 xfs_filblks_t blockcount = 0; /* number of blocks in extent */
4292 xfs_bmbt_rec_t *ep = NULL; /* pointer to target extent */ 4292 xfs_bmbt_rec_t *ep = NULL; /* pointer to target extent */
4293 xfs_ext_irec_t *erp = NULL; /* indirection array pointer */ 4293 xfs_ext_irec_t *erp = NULL; /* indirection array pointer */
4294 int high; /* upper boundry in search */ 4294 int high; /* upper boundary in search */
4295 xfs_extnum_t idx = 0; /* index of target extent */ 4295 xfs_extnum_t idx = 0; /* index of target extent */
4296 int low; /* lower boundry in search */ 4296 int low; /* lower boundary in search */
4297 xfs_extnum_t nextents; /* number of file extents */ 4297 xfs_extnum_t nextents; /* number of file extents */
4298 xfs_fileoff_t startoff = 0; /* start offset of extent */ 4298 xfs_fileoff_t startoff = 0; /* start offset of extent */
4299 4299
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 36aa1fcb90a5..7497a481b2f5 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -580,7 +580,7 @@ xfs_inode_item_unpin_remove(
580 * been or is in the process of being flushed, then (ideally) we'd like to 580 * been or is in the process of being flushed, then (ideally) we'd like to
581 * see if the inode's buffer is still incore, and if so give it a nudge. 581 * see if the inode's buffer is still incore, and if so give it a nudge.
582 * We delay doing so until the pushbuf routine, though, to avoid holding 582 * We delay doing so until the pushbuf routine, though, to avoid holding
583 * the AIL lock across a call to the blackhole which is the buffercache. 583 * the AIL lock across a call to the blackhole which is the buffer cache.
584 * Also we don't want to sleep in any device strategy routines, which can happen 584 * Also we don't want to sleep in any device strategy routines, which can happen
585 * if we do the subsequent bawrite in here. 585 * if we do the subsequent bawrite in here.
586 */ 586 */
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index 32247b6bfee7..94068d014f27 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -272,7 +272,7 @@ xfs_bulkstat(
272 size_t statstruct_size, /* sizeof struct filling */ 272 size_t statstruct_size, /* sizeof struct filling */
273 char __user *ubuffer, /* buffer with inode stats */ 273 char __user *ubuffer, /* buffer with inode stats */
274 int flags, /* defined in xfs_itable.h */ 274 int flags, /* defined in xfs_itable.h */
275 int *done) /* 1 if there're more stats to get */ 275 int *done) /* 1 if there are more stats to get */
276{ 276{
277 xfs_agblock_t agbno=0;/* allocation group block number */ 277 xfs_agblock_t agbno=0;/* allocation group block number */
278 xfs_buf_t *agbp; /* agi header buffer */ 278 xfs_buf_t *agbp; /* agi header buffer */
@@ -676,7 +676,7 @@ xfs_bulkstat_single(
676 xfs_mount_t *mp, /* mount point for filesystem */ 676 xfs_mount_t *mp, /* mount point for filesystem */
677 xfs_ino_t *lastinop, /* inode to return */ 677 xfs_ino_t *lastinop, /* inode to return */
678 char __user *buffer, /* buffer with inode stats */ 678 char __user *buffer, /* buffer with inode stats */
679 int *done) /* 1 if there're more stats to get */ 679 int *done) /* 1 if there are more stats to get */
680{ 680{
681 int count; /* count value for bulkstat call */ 681 int count; /* count value for bulkstat call */
682 int error; /* return value */ 682 int error; /* return value */
diff --git a/fs/xfs/xfs_itable.h b/fs/xfs/xfs_itable.h
index 047d834ed210..11eb4e1b18c4 100644
--- a/fs/xfs/xfs_itable.h
+++ b/fs/xfs/xfs_itable.h
@@ -60,7 +60,7 @@ xfs_bulkstat(
60 size_t statstruct_size,/* sizeof struct that we're filling */ 60 size_t statstruct_size,/* sizeof struct that we're filling */
61 char __user *ubuffer,/* buffer with inode stats */ 61 char __user *ubuffer,/* buffer with inode stats */
62 int flags, /* flag to control access method */ 62 int flags, /* flag to control access method */
63 int *done); /* 1 if there're more stats to get */ 63 int *done); /* 1 if there are more stats to get */
64 64
65int 65int
66xfs_bulkstat_single( 66xfs_bulkstat_single(
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 9176995160ed..32e841d2f26d 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -59,7 +59,7 @@ STATIC xlog_t * xlog_alloc_log(xfs_mount_t *mp,
59 int num_bblks); 59 int num_bblks);
60STATIC int xlog_space_left(xlog_t *log, int cycle, int bytes); 60STATIC int xlog_space_left(xlog_t *log, int cycle, int bytes);
61STATIC int xlog_sync(xlog_t *log, xlog_in_core_t *iclog); 61STATIC int xlog_sync(xlog_t *log, xlog_in_core_t *iclog);
62STATIC void xlog_unalloc_log(xlog_t *log); 62STATIC void xlog_dealloc_log(xlog_t *log);
63STATIC int xlog_write(xfs_mount_t *mp, xfs_log_iovec_t region[], 63STATIC int xlog_write(xfs_mount_t *mp, xfs_log_iovec_t region[],
64 int nentries, xfs_log_ticket_t tic, 64 int nentries, xfs_log_ticket_t tic,
65 xfs_lsn_t *start_lsn, 65 xfs_lsn_t *start_lsn,
@@ -304,7 +304,7 @@ xfs_log_done(xfs_mount_t *mp,
304 if ((ticket->t_flags & XLOG_TIC_PERM_RESERV) == 0 || 304 if ((ticket->t_flags & XLOG_TIC_PERM_RESERV) == 0 ||
305 (flags & XFS_LOG_REL_PERM_RESERV)) { 305 (flags & XFS_LOG_REL_PERM_RESERV)) {
306 /* 306 /*
307 * Release ticket if not permanent reservation or a specifc 307 * Release ticket if not permanent reservation or a specific
308 * request has been made to release a permanent reservation. 308 * request has been made to release a permanent reservation.
309 */ 309 */
310 xlog_trace_loggrant(log, ticket, "xfs_log_done: (non-permanent)"); 310 xlog_trace_loggrant(log, ticket, "xfs_log_done: (non-permanent)");
@@ -511,7 +511,7 @@ xfs_log_mount(xfs_mount_t *mp,
511 vfsp->vfs_flag |= VFS_RDONLY; 511 vfsp->vfs_flag |= VFS_RDONLY;
512 if (error) { 512 if (error) {
513 cmn_err(CE_WARN, "XFS: log mount/recovery failed: error %d", error); 513 cmn_err(CE_WARN, "XFS: log mount/recovery failed: error %d", error);
514 xlog_unalloc_log(mp->m_log); 514 xlog_dealloc_log(mp->m_log);
515 return error; 515 return error;
516 } 516 }
517 } 517 }
@@ -667,7 +667,7 @@ xfs_log_unmount_write(xfs_mount_t *mp)
667 * 667 *
668 * Go through the motions of sync'ing and releasing 668 * Go through the motions of sync'ing and releasing
669 * the iclog, even though no I/O will actually happen, 669 * the iclog, even though no I/O will actually happen,
670 * we need to wait for other log I/O's that may already 670 * we need to wait for other log I/Os that may already
671 * be in progress. Do this as a separate section of 671 * be in progress. Do this as a separate section of
672 * code so we'll know if we ever get stuck here that 672 * code so we'll know if we ever get stuck here that
673 * we're in this odd situation of trying to unmount 673 * we're in this odd situation of trying to unmount
@@ -704,7 +704,7 @@ xfs_log_unmount_write(xfs_mount_t *mp)
704void 704void
705xfs_log_unmount_dealloc(xfs_mount_t *mp) 705xfs_log_unmount_dealloc(xfs_mount_t *mp)
706{ 706{
707 xlog_unalloc_log(mp->m_log); 707 xlog_dealloc_log(mp->m_log);
708} 708}
709 709
710/* 710/*
@@ -1492,7 +1492,7 @@ xlog_sync(xlog_t *log,
1492 ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1); 1492 ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1);
1493 ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize); 1493 ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize);
1494 1494
1495 /* account for internal log which does't start at block #0 */ 1495 /* account for internal log which doesn't start at block #0 */
1496 XFS_BUF_SET_ADDR(bp, XFS_BUF_ADDR(bp) + log->l_logBBstart); 1496 XFS_BUF_SET_ADDR(bp, XFS_BUF_ADDR(bp) + log->l_logBBstart);
1497 XFS_BUF_WRITE(bp); 1497 XFS_BUF_WRITE(bp);
1498 if ((error = XFS_bwrite(bp))) { 1498 if ((error = XFS_bwrite(bp))) {
@@ -1506,10 +1506,10 @@ xlog_sync(xlog_t *log,
1506 1506
1507 1507
1508/* 1508/*
1509 * Unallocate a log structure 1509 * Deallocate a log structure
1510 */ 1510 */
1511void 1511void
1512xlog_unalloc_log(xlog_t *log) 1512xlog_dealloc_log(xlog_t *log)
1513{ 1513{
1514 xlog_in_core_t *iclog, *next_iclog; 1514 xlog_in_core_t *iclog, *next_iclog;
1515 xlog_ticket_t *tic, *next_tic; 1515 xlog_ticket_t *tic, *next_tic;
@@ -1539,7 +1539,7 @@ xlog_unalloc_log(xlog_t *log)
1539 if ((log->l_ticket_cnt != log->l_ticket_tcnt) && 1539 if ((log->l_ticket_cnt != log->l_ticket_tcnt) &&
1540 !XLOG_FORCED_SHUTDOWN(log)) { 1540 !XLOG_FORCED_SHUTDOWN(log)) {
1541 xfs_fs_cmn_err(CE_WARN, log->l_mp, 1541 xfs_fs_cmn_err(CE_WARN, log->l_mp,
1542 "xlog_unalloc_log: (cnt: %d, total: %d)", 1542 "xlog_dealloc_log: (cnt: %d, total: %d)",
1543 log->l_ticket_cnt, log->l_ticket_tcnt); 1543 log->l_ticket_cnt, log->l_ticket_tcnt);
1544 /* ASSERT(log->l_ticket_cnt == log->l_ticket_tcnt); */ 1544 /* ASSERT(log->l_ticket_cnt == log->l_ticket_tcnt); */
1545 1545
@@ -1562,7 +1562,7 @@ xlog_unalloc_log(xlog_t *log)
1562#endif 1562#endif
1563 log->l_mp->m_log = NULL; 1563 log->l_mp->m_log = NULL;
1564 kmem_free(log, sizeof(xlog_t)); 1564 kmem_free(log, sizeof(xlog_t));
1565} /* xlog_unalloc_log */ 1565} /* xlog_dealloc_log */
1566 1566
1567/* 1567/*
1568 * Update counters atomically now that memcpy is done. 1568 * Update counters atomically now that memcpy is done.
@@ -2829,7 +2829,7 @@ xlog_state_release_iclog(xlog_t *log,
2829 2829
2830 /* 2830 /*
2831 * We let the log lock go, so it's possible that we hit a log I/O 2831 * We let the log lock go, so it's possible that we hit a log I/O
2832 * error or someother SHUTDOWN condition that marks the iclog 2832 * error or some other SHUTDOWN condition that marks the iclog
2833 * as XLOG_STATE_IOERROR before the bwrite. However, we know that 2833 * as XLOG_STATE_IOERROR before the bwrite. However, we know that
2834 * this iclog has consistent data, so we ignore IOERROR 2834 * this iclog has consistent data, so we ignore IOERROR
2835 * flags after this point. 2835 * flags after this point.
diff --git a/fs/xfs/xfs_log.h b/fs/xfs/xfs_log.h
index 4b2ac88dbb83..eacb3d4987f2 100644
--- a/fs/xfs/xfs_log.h
+++ b/fs/xfs/xfs_log.h
@@ -27,7 +27,7 @@
27 27
28#ifdef __KERNEL__ 28#ifdef __KERNEL__
29/* 29/*
30 * By comparing each compnent, we don't have to worry about extra 30 * By comparing each component, we don't have to worry about extra
31 * endian issues in treating two 32 bit numbers as one 64 bit number 31 * endian issues in treating two 32 bit numbers as one 64 bit number
32 */ 32 */
33static inline xfs_lsn_t _lsn_cmp(xfs_lsn_t lsn1, xfs_lsn_t lsn2) 33static inline xfs_lsn_t _lsn_cmp(xfs_lsn_t lsn1, xfs_lsn_t lsn2)
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index add13f507ed2..1f0016b0b4ec 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -583,7 +583,7 @@ xlog_find_head(
583 * x | x ... | x - 1 | x 583 * x | x ... | x - 1 | x
584 * Another case that fits this picture would be 584 * Another case that fits this picture would be
585 * x | x + 1 | x ... | x 585 * x | x + 1 | x ... | x
586 * In this case the head really is somwhere at the end of the 586 * In this case the head really is somewhere at the end of the
587 * log, as one of the latest writes at the beginning was 587 * log, as one of the latest writes at the beginning was
588 * incomplete. 588 * incomplete.
589 * One more case is 589 * One more case is
@@ -2799,7 +2799,7 @@ xlog_recover_do_trans(
2799 * we don't need to worry about the block number being 2799 * we don't need to worry about the block number being
2800 * truncated in > 1 TB buffers because in user-land, 2800 * truncated in > 1 TB buffers because in user-land,
2801 * we're now n32 or 64-bit so xfs_daddr_t is 64-bits so 2801 * we're now n32 or 64-bit so xfs_daddr_t is 64-bits so
2802 * the blkno's will get through the user-mode buffer 2802 * the blknos will get through the user-mode buffer
2803 * cache properly. The only bad case is o32 kernels 2803 * cache properly. The only bad case is o32 kernels
2804 * where xfs_daddr_t is 32-bits but mount will warn us 2804 * where xfs_daddr_t is 32-bits but mount will warn us
2805 * off a > 1 TB filesystem before we get here. 2805 * off a > 1 TB filesystem before we get here.
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 20e8abc16d18..72e7e78bfff8 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -393,7 +393,7 @@ xfs_initialize_perag(
393 break; 393 break;
394 } 394 }
395 395
396 /* This ag is prefered for inodes */ 396 /* This ag is preferred for inodes */
397 pag = &mp->m_perag[index]; 397 pag = &mp->m_perag[index];
398 pag->pagi_inodeok = 1; 398 pag->pagi_inodeok = 1;
399 if (index < max_metadata) 399 if (index < max_metadata)
@@ -1728,7 +1728,7 @@ xfs_mount_log_sbunit(
1728 * We cannot use the hotcpu_register() function because it does 1728 * We cannot use the hotcpu_register() function because it does
1729 * not allow notifier instances. We need a notifier per filesystem 1729 * not allow notifier instances. We need a notifier per filesystem
1730 * as we need to be able to identify the filesystem to balance 1730 * as we need to be able to identify the filesystem to balance
1731 * the counters out. This is acheived by having a notifier block 1731 * the counters out. This is achieved by having a notifier block
1732 * embedded in the xfs_mount_t and doing pointer magic to get the 1732 * embedded in the xfs_mount_t and doing pointer magic to get the
1733 * mount pointer from the notifier block address. 1733 * mount pointer from the notifier block address.
1734 */ 1734 */
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index ebd73960e9db..66cbee79864e 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -379,7 +379,7 @@ typedef struct xfs_mount {
379#endif 379#endif
380 int m_dalign; /* stripe unit */ 380 int m_dalign; /* stripe unit */
381 int m_swidth; /* stripe width */ 381 int m_swidth; /* stripe width */
382 int m_sinoalign; /* stripe unit inode alignmnt */ 382 int m_sinoalign; /* stripe unit inode alignment */
383 int m_attr_magicpct;/* 37% of the blocksize */ 383 int m_attr_magicpct;/* 37% of the blocksize */
384 int m_dir_magicpct; /* 37% of the dir blocksize */ 384 int m_dir_magicpct; /* 37% of the dir blocksize */
385 __uint8_t m_mk_sharedro; /* mark shared ro on unmount */ 385 __uint8_t m_mk_sharedro; /* mark shared ro on unmount */
diff --git a/fs/xfs/xfs_quota.h b/fs/xfs/xfs_quota.h
index 82a08baf437b..4f6a034de7f7 100644
--- a/fs/xfs/xfs_quota.h
+++ b/fs/xfs/xfs_quota.h
@@ -31,7 +31,7 @@
31typedef __uint32_t xfs_dqid_t; 31typedef __uint32_t xfs_dqid_t;
32 32
33/* 33/*
34 * Eventhough users may not have quota limits occupying all 64-bits, 34 * Even though users may not have quota limits occupying all 64-bits,
35 * they may need 64-bit accounting. Hence, 64-bit quota-counters, 35 * they may need 64-bit accounting. Hence, 64-bit quota-counters,
36 * and quota-limits. This is a waste in the common case, but hey ... 36 * and quota-limits. This is a waste in the common case, but hey ...
37 */ 37 */
@@ -246,7 +246,7 @@ typedef struct xfs_qoff_logformat {
246#ifdef __KERNEL__ 246#ifdef __KERNEL__
247/* 247/*
248 * This check is done typically without holding the inode lock; 248 * This check is done typically without holding the inode lock;
249 * that may seem racey, but it is harmless in the context that it is used. 249 * that may seem racy, but it is harmless in the context that it is used.
250 * The inode cannot go inactive as long a reference is kept, and 250 * The inode cannot go inactive as long a reference is kept, and
251 * therefore if dquot(s) were attached, they'll stay consistent. 251 * therefore if dquot(s) were attached, they'll stay consistent.
252 * If, for example, the ownership of the inode changes while 252 * If, for example, the ownership of the inode changes while
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index 2918956553a5..8d056cef5d1f 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -490,7 +490,7 @@ xfs_trans_mod_sb(
490 case XFS_TRANS_SB_RES_FREXTENTS: 490 case XFS_TRANS_SB_RES_FREXTENTS:
491 /* 491 /*
492 * The allocation has already been applied to the 492 * The allocation has already been applied to the
493 * in-core superblocks's counter. This should only 493 * in-core superblock's counter. This should only
494 * be applied to the on-disk superblock. 494 * be applied to the on-disk superblock.
495 */ 495 */
496 ASSERT(delta < 0); 496 ASSERT(delta < 0);
@@ -611,7 +611,7 @@ xfs_trans_apply_sb_deltas(
611 611
612 if (whole) 612 if (whole)
613 /* 613 /*
614 * Log the whole thing, the fields are discontiguous. 614 * Log the whole thing, the fields are noncontiguous.
615 */ 615 */
616 xfs_trans_log_buf(tp, bp, 0, sizeof(xfs_sb_t) - 1); 616 xfs_trans_log_buf(tp, bp, 0, sizeof(xfs_sb_t) - 1);
617 else 617 else
@@ -669,7 +669,7 @@ xfs_trans_unreserve_and_mod_sb(
669 /* 669 /*
670 * Apply any superblock modifications to the in-core version. 670 * Apply any superblock modifications to the in-core version.
671 * The t_res_fdblocks_delta and t_res_frextents_delta fields are 671 * The t_res_fdblocks_delta and t_res_frextents_delta fields are
672 * explicity NOT applied to the in-core superblock. 672 * explicitly NOT applied to the in-core superblock.
673 * The idea is that that has already been done. 673 * The idea is that that has already been done.
674 */ 674 */
675 if (tp->t_flags & XFS_TRANS_SB_DIRTY) { 675 if (tp->t_flags & XFS_TRANS_SB_DIRTY) {
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index e48befa4e337..100d9a4b38ee 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -354,7 +354,7 @@ typedef struct xfs_trans {
354 xfs_lsn_t t_commit_lsn; /* log seq num of end of 354 xfs_lsn_t t_commit_lsn; /* log seq num of end of
355 * transaction. */ 355 * transaction. */
356 struct xfs_mount *t_mountp; /* ptr to fs mount struct */ 356 struct xfs_mount *t_mountp; /* ptr to fs mount struct */
357 struct xfs_dquot_acct *t_dqinfo; /* accting info for dquots */ 357 struct xfs_dquot_acct *t_dqinfo; /* acctg info for dquots */
358 xfs_trans_callback_t t_callback; /* transaction callback */ 358 xfs_trans_callback_t t_callback; /* transaction callback */
359 void *t_callarg; /* callback arg */ 359 void *t_callarg; /* callback arg */
360 unsigned int t_flags; /* misc flags */ 360 unsigned int t_flags; /* misc flags */
diff --git a/fs/xfs/xfs_trans_inode.c b/fs/xfs/xfs_trans_inode.c
index e341409172d2..7c5894d59f81 100644
--- a/fs/xfs/xfs_trans_inode.c
+++ b/fs/xfs/xfs_trans_inode.c
@@ -272,7 +272,7 @@ xfs_trans_log_inode(
272 * This is to coordinate with the xfs_iflush() and xfs_iflush_done() 272 * This is to coordinate with the xfs_iflush() and xfs_iflush_done()
273 * routines in the eventual clearing of the ilf_fields bits. 273 * routines in the eventual clearing of the ilf_fields bits.
274 * See the big comment in xfs_iflush() for an explanation of 274 * See the big comment in xfs_iflush() for an explanation of
275 * this coorination mechanism. 275 * this coordination mechanism.
276 */ 276 */
277 flags |= ip->i_itemp->ili_last_fields; 277 flags |= ip->i_itemp->ili_last_fields;
278 ip->i_itemp->ili_format.ilf_fields |= flags; 278 ip->i_itemp->ili_format.ilf_fields |= flags;
diff --git a/fs/xfs/xfs_vfsops.c b/fs/xfs/xfs_vfsops.c
index d4ec4dfaf19c..504d2a80747a 100644
--- a/fs/xfs/xfs_vfsops.c
+++ b/fs/xfs/xfs_vfsops.c
@@ -880,10 +880,10 @@ xfs_statvfs(
880 * determine if they should be flushed sync, async, or 880 * determine if they should be flushed sync, async, or
881 * delwri. 881 * delwri.
882 * SYNC_CLOSE - This flag is passed when the system is being 882 * SYNC_CLOSE - This flag is passed when the system is being
883 * unmounted. We should sync and invalidate everthing. 883 * unmounted. We should sync and invalidate everything.
884 * SYNC_FSDATA - This indicates that the caller would like to make 884 * SYNC_FSDATA - This indicates that the caller would like to make
885 * sure the superblock is safe on disk. We can ensure 885 * sure the superblock is safe on disk. We can ensure
886 * this by simply makeing sure the log gets flushed 886 * this by simply making sure the log gets flushed
887 * if SYNC_BDFLUSH is set, and by actually writing it 887 * if SYNC_BDFLUSH is set, and by actually writing it
888 * out otherwise. 888 * out otherwise.
889 * 889 *
@@ -908,7 +908,7 @@ xfs_sync(
908 * 908 *
909 * This routine supports all of the flags defined for the generic VFS_SYNC 909 * This routine supports all of the flags defined for the generic VFS_SYNC
910 * interface as explained above under xfs_sync. In the interests of not 910 * interface as explained above under xfs_sync. In the interests of not
911 * changing interfaces within the 6.5 family, additional internallly- 911 * changing interfaces within the 6.5 family, additional internally-
912 * required functions are specified within a separate xflags parameter, 912 * required functions are specified within a separate xflags parameter,
913 * only available by calling this routine. 913 * only available by calling this routine.
914 * 914 *
@@ -1090,7 +1090,7 @@ xfs_sync_inodes(
1090 * If this is just vfs_sync() or pflushd() calling 1090 * If this is just vfs_sync() or pflushd() calling
1091 * then we can skip inodes for which it looks like 1091 * then we can skip inodes for which it looks like
1092 * there is nothing to do. Since we don't have the 1092 * there is nothing to do. Since we don't have the
1093 * inode locked this is racey, but these are periodic 1093 * inode locked this is racy, but these are periodic
1094 * calls so it doesn't matter. For the others we want 1094 * calls so it doesn't matter. For the others we want
1095 * to know for sure, so we at least try to lock them. 1095 * to know for sure, so we at least try to lock them.
1096 */ 1096 */
@@ -1429,7 +1429,7 @@ xfs_sync_inodes(
1429 * 1429 *
1430 * This routine supports all of the flags defined for the generic VFS_SYNC 1430 * This routine supports all of the flags defined for the generic VFS_SYNC
1431 * interface as explained above under xfs_sync. In the interests of not 1431 * interface as explained above under xfs_sync. In the interests of not
1432 * changing interfaces within the 6.5 family, additional internallly- 1432 * changing interfaces within the 6.5 family, additional internally-
1433 * required functions are specified within a separate xflags parameter, 1433 * required functions are specified within a separate xflags parameter,
1434 * only available by calling this routine. 1434 * only available by calling this routine.
1435 * 1435 *
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index 0f0a64e81db9..de49601919c1 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -848,7 +848,7 @@ xfs_setattr(
848 * If this is a synchronous mount, make sure that the 848 * If this is a synchronous mount, make sure that the
849 * transaction goes to disk before returning to the user. 849 * transaction goes to disk before returning to the user.
850 * This is slightly sub-optimal in that truncates require 850 * This is slightly sub-optimal in that truncates require
851 * two sync transactions instead of one for wsync filesytems. 851 * two sync transactions instead of one for wsync filesystems.
852 * One for the truncate and one for the timestamps since we 852 * One for the truncate and one for the timestamps since we
853 * don't want to change the timestamps unless we're sure the 853 * don't want to change the timestamps unless we're sure the
854 * truncate worked. Truncates are less than 1% of the laddis 854 * truncate worked. Truncates are less than 1% of the laddis
@@ -1170,7 +1170,7 @@ xfs_fsync(
1170 1170
1171 /* 1171 /*
1172 * If this inode is on the RT dev we need to flush that 1172 * If this inode is on the RT dev we need to flush that
1173 * cache aswell. 1173 * cache as well.
1174 */ 1174 */
1175 if (ip->i_d.di_flags & XFS_DIFLAG_REALTIME) 1175 if (ip->i_d.di_flags & XFS_DIFLAG_REALTIME)
1176 xfs_blkdev_issue_flush(ip->i_mount->m_rtdev_targp); 1176 xfs_blkdev_issue_flush(ip->i_mount->m_rtdev_targp);
@@ -1380,7 +1380,7 @@ xfs_inactive_symlink_rmt(
1380 */ 1380 */
1381 ntp = xfs_trans_dup(tp); 1381 ntp = xfs_trans_dup(tp);
1382 /* 1382 /*
1383 * Commit the transaction containing extent freeing and EFD's. 1383 * Commit the transaction containing extent freeing and EFDs.
1384 * If we get an error on the commit here or on the reserve below, 1384 * If we get an error on the commit here or on the reserve below,
1385 * we need to unlock the inode since the new transaction doesn't 1385 * we need to unlock the inode since the new transaction doesn't
1386 * have the inode attached. 1386 * have the inode attached.
@@ -2023,7 +2023,7 @@ xfs_create(
2023 XFS_QM_DQRELE(mp, gdqp); 2023 XFS_QM_DQRELE(mp, gdqp);
2024 2024
2025 /* 2025 /*
2026 * Propogate the fact that the vnode changed after the 2026 * Propagate the fact that the vnode changed after the
2027 * xfs_inode locks have been released. 2027 * xfs_inode locks have been released.
2028 */ 2028 */
2029 VOP_VNODE_CHANGE(vp, VCHANGE_FLAGS_TRUNCATED, 3); 2029 VOP_VNODE_CHANGE(vp, VCHANGE_FLAGS_TRUNCATED, 3);
@@ -2370,7 +2370,7 @@ xfs_remove(
2370 * for a log reservation. Since we'll have to wait for the 2370 * for a log reservation. Since we'll have to wait for the
2371 * inactive code to complete before returning from xfs_iget, 2371 * inactive code to complete before returning from xfs_iget,
2372 * we need to make sure that we don't have log space reserved 2372 * we need to make sure that we don't have log space reserved
2373 * when we call xfs_iget. Instead we get an unlocked referece 2373 * when we call xfs_iget. Instead we get an unlocked reference
2374 * to the inode before getting our log reservation. 2374 * to the inode before getting our log reservation.
2375 */ 2375 */
2376 error = xfs_get_dir_entry(dentry, &ip); 2376 error = xfs_get_dir_entry(dentry, &ip);
@@ -3020,7 +3020,7 @@ xfs_rmdir(
3020 * for a log reservation. Since we'll have to wait for the 3020 * for a log reservation. Since we'll have to wait for the
3021 * inactive code to complete before returning from xfs_iget, 3021 * inactive code to complete before returning from xfs_iget,
3022 * we need to make sure that we don't have log space reserved 3022 * we need to make sure that we don't have log space reserved
3023 * when we call xfs_iget. Instead we get an unlocked referece 3023 * when we call xfs_iget. Instead we get an unlocked reference
3024 * to the inode before getting our log reservation. 3024 * to the inode before getting our log reservation.
3025 */ 3025 */
3026 error = xfs_get_dir_entry(dentry, &cdp); 3026 error = xfs_get_dir_entry(dentry, &cdp);
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 92146f3b7423..41ecbb847f32 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -62,6 +62,8 @@
62 .posix_timers = LIST_HEAD_INIT(sig.posix_timers), \ 62 .posix_timers = LIST_HEAD_INIT(sig.posix_timers), \
63 .cpu_timers = INIT_CPU_TIMERS(sig.cpu_timers), \ 63 .cpu_timers = INIT_CPU_TIMERS(sig.cpu_timers), \
64 .rlim = INIT_RLIMITS, \ 64 .rlim = INIT_RLIMITS, \
65 .pgrp = 1, \
66 .session = 1, \
65} 67}
66 68
67#define INIT_SIGHAND(sighand) { \ 69#define INIT_SIGHAND(sighand) { \
diff --git a/include/linux/pid.h b/include/linux/pid.h
index 5b2fcb19d2da..5b9082cc600f 100644
--- a/include/linux/pid.h
+++ b/include/linux/pid.h
@@ -4,7 +4,6 @@
4enum pid_type 4enum pid_type
5{ 5{
6 PIDTYPE_PID, 6 PIDTYPE_PID,
7 PIDTYPE_TGID,
8 PIDTYPE_PGID, 7 PIDTYPE_PGID,
9 PIDTYPE_SID, 8 PIDTYPE_SID,
10 PIDTYPE_MAX 9 PIDTYPE_MAX
@@ -38,7 +37,6 @@ extern struct pid *FASTCALL(find_pid(enum pid_type, int));
38 37
39extern int alloc_pidmap(void); 38extern int alloc_pidmap(void);
40extern void FASTCALL(free_pidmap(int)); 39extern void FASTCALL(free_pidmap(int));
41extern void switch_exec_pids(struct task_struct *leader, struct task_struct *thread);
42 40
43#define do_each_task_pid(who, type, task) \ 41#define do_each_task_pid(who, type, task) \
44 if ((task = find_task_by_pid_type(type, who))) { \ 42 if ((task = find_task_by_pid_type(type, who))) { \
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 20b4f0372e44..d04186d8cc68 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -355,16 +355,8 @@ struct sighand_struct {
355 atomic_t count; 355 atomic_t count;
356 struct k_sigaction action[_NSIG]; 356 struct k_sigaction action[_NSIG];
357 spinlock_t siglock; 357 spinlock_t siglock;
358 struct rcu_head rcu;
359}; 358};
360 359
361extern void sighand_free_cb(struct rcu_head *rhp);
362
363static inline void sighand_free(struct sighand_struct *sp)
364{
365 call_rcu(&sp->rcu, sighand_free_cb);
366}
367
368/* 360/*
369 * NOTE! "signal_struct" does not have it's own 361 * NOTE! "signal_struct" does not have it's own
370 * locking, because a shared signal_struct always 362 * locking, because a shared signal_struct always
@@ -760,6 +752,7 @@ struct task_struct {
760 752
761 /* PID/PID hash table linkage. */ 753 /* PID/PID hash table linkage. */
762 struct pid pids[PIDTYPE_MAX]; 754 struct pid pids[PIDTYPE_MAX];
755 struct list_head thread_group;
763 756
764 struct completion *vfork_done; /* for vfork() */ 757 struct completion *vfork_done; /* for vfork() */
765 int __user *set_child_tid; /* CLONE_CHILD_SETTID */ 758 int __user *set_child_tid; /* CLONE_CHILD_SETTID */
@@ -1101,7 +1094,6 @@ extern void force_sig_specific(int, struct task_struct *);
1101extern int send_sig(int, struct task_struct *, int); 1094extern int send_sig(int, struct task_struct *, int);
1102extern void zap_other_threads(struct task_struct *p); 1095extern void zap_other_threads(struct task_struct *p);
1103extern int kill_pg(pid_t, int, int); 1096extern int kill_pg(pid_t, int, int);
1104extern int kill_sl(pid_t, int, int);
1105extern int kill_proc(pid_t, int, int); 1097extern int kill_proc(pid_t, int, int);
1106extern struct sigqueue *sigqueue_alloc(void); 1098extern struct sigqueue *sigqueue_alloc(void);
1107extern void sigqueue_free(struct sigqueue *); 1099extern void sigqueue_free(struct sigqueue *);
@@ -1158,10 +1150,8 @@ extern void flush_thread(void);
1158extern void exit_thread(void); 1150extern void exit_thread(void);
1159 1151
1160extern void exit_files(struct task_struct *); 1152extern void exit_files(struct task_struct *);
1161extern void exit_signal(struct task_struct *); 1153extern void __cleanup_signal(struct signal_struct *);
1162extern void __exit_signal(struct task_struct *); 1154extern void __cleanup_sighand(struct sighand_struct *);
1163extern void exit_sighand(struct task_struct *);
1164extern void __exit_sighand(struct task_struct *);
1165extern void exit_itimers(struct signal_struct *); 1155extern void exit_itimers(struct signal_struct *);
1166 1156
1167extern NORET_TYPE void do_group_exit(int); 1157extern NORET_TYPE void do_group_exit(int);
@@ -1185,19 +1175,7 @@ extern void wait_task_inactive(task_t * p);
1185#endif 1175#endif
1186 1176
1187#define remove_parent(p) list_del_init(&(p)->sibling) 1177#define remove_parent(p) list_del_init(&(p)->sibling)
1188#define add_parent(p, parent) list_add_tail(&(p)->sibling,&(parent)->children) 1178#define add_parent(p) list_add_tail(&(p)->sibling,&(p)->parent->children)
1189
1190#define REMOVE_LINKS(p) do { \
1191 if (thread_group_leader(p)) \
1192 list_del_init(&(p)->tasks); \
1193 remove_parent(p); \
1194 } while (0)
1195
1196#define SET_LINKS(p) do { \
1197 if (thread_group_leader(p)) \
1198 list_add_tail(&(p)->tasks,&init_task.tasks); \
1199 add_parent(p, (p)->parent); \
1200 } while (0)
1201 1179
1202#define next_task(p) list_entry((p)->tasks.next, struct task_struct, tasks) 1180#define next_task(p) list_entry((p)->tasks.next, struct task_struct, tasks)
1203#define prev_task(p) list_entry((p)->tasks.prev, struct task_struct, tasks) 1181#define prev_task(p) list_entry((p)->tasks.prev, struct task_struct, tasks)
@@ -1215,20 +1193,22 @@ extern void wait_task_inactive(task_t * p);
1215#define while_each_thread(g, t) \ 1193#define while_each_thread(g, t) \
1216 while ((t = next_thread(t)) != g) 1194 while ((t = next_thread(t)) != g)
1217 1195
1218extern task_t * FASTCALL(next_thread(const task_t *p));
1219
1220#define thread_group_leader(p) (p->pid == p->tgid) 1196#define thread_group_leader(p) (p->pid == p->tgid)
1221 1197
1198static inline task_t *next_thread(task_t *p)
1199{
1200 return list_entry(rcu_dereference(p->thread_group.next),
1201 task_t, thread_group);
1202}
1203
1222static inline int thread_group_empty(task_t *p) 1204static inline int thread_group_empty(task_t *p)
1223{ 1205{
1224 return list_empty(&p->pids[PIDTYPE_TGID].pid_list); 1206 return list_empty(&p->thread_group);
1225} 1207}
1226 1208
1227#define delay_group_leader(p) \ 1209#define delay_group_leader(p) \
1228 (thread_group_leader(p) && !thread_group_empty(p)) 1210 (thread_group_leader(p) && !thread_group_empty(p))
1229 1211
1230extern void unhash_process(struct task_struct *p);
1231
1232/* 1212/*
1233 * Protects ->fs, ->files, ->mm, ->ptrace, ->group_info, ->comm, keyring 1213 * Protects ->fs, ->files, ->mm, ->ptrace, ->group_info, ->comm, keyring
1234 * subscriptions and synchronises with wait4(). Also used in procfs. Also 1214 * subscriptions and synchronises with wait4(). Also used in procfs. Also
@@ -1248,6 +1228,15 @@ static inline void task_unlock(struct task_struct *p)
1248 spin_unlock(&p->alloc_lock); 1228 spin_unlock(&p->alloc_lock);
1249} 1229}
1250 1230
1231extern struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
1232 unsigned long *flags);
1233
1234static inline void unlock_task_sighand(struct task_struct *tsk,
1235 unsigned long *flags)
1236{
1237 spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
1238}
1239
1251#ifndef __HAVE_THREAD_FUNCTIONS 1240#ifndef __HAVE_THREAD_FUNCTIONS
1252 1241
1253#define task_thread_info(task) (task)->thread_info 1242#define task_thread_info(task) (task)->thread_info
diff --git a/include/linux/signal.h b/include/linux/signal.h
index b7d093520bb6..162a8fd10b29 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -249,6 +249,8 @@ static inline void init_sigpending(struct sigpending *sig)
249 INIT_LIST_HEAD(&sig->list); 249 INIT_LIST_HEAD(&sig->list);
250} 250}
251 251
252extern void flush_sigqueue(struct sigpending *queue);
253
252/* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */ 254/* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */
253static inline int valid_signal(unsigned long sig) 255static inline int valid_signal(unsigned long sig)
254{ 256{
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 15e1d9736b1b..3af03b19c983 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -210,7 +210,6 @@ extern kmem_cache_t *names_cachep;
210extern kmem_cache_t *files_cachep; 210extern kmem_cache_t *files_cachep;
211extern kmem_cache_t *filp_cachep; 211extern kmem_cache_t *filp_cachep;
212extern kmem_cache_t *fs_cachep; 212extern kmem_cache_t *fs_cachep;
213extern kmem_cache_t *signal_cachep;
214extern kmem_cache_t *sighand_cachep; 213extern kmem_cache_t *sighand_cachep;
215extern kmem_cache_t *bio_cachep; 214extern kmem_cache_t *bio_cachep;
216 215
diff --git a/kernel/exit.c b/kernel/exit.c
index a8c7efc7a681..bc0ec674d3f4 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -29,6 +29,7 @@
29#include <linux/cpuset.h> 29#include <linux/cpuset.h>
30#include <linux/syscalls.h> 30#include <linux/syscalls.h>
31#include <linux/signal.h> 31#include <linux/signal.h>
32#include <linux/posix-timers.h>
32#include <linux/cn_proc.h> 33#include <linux/cn_proc.h>
33#include <linux/mutex.h> 34#include <linux/mutex.h>
34#include <linux/futex.h> 35#include <linux/futex.h>
@@ -50,15 +51,80 @@ static void __unhash_process(struct task_struct *p)
50{ 51{
51 nr_threads--; 52 nr_threads--;
52 detach_pid(p, PIDTYPE_PID); 53 detach_pid(p, PIDTYPE_PID);
53 detach_pid(p, PIDTYPE_TGID);
54 if (thread_group_leader(p)) { 54 if (thread_group_leader(p)) {
55 detach_pid(p, PIDTYPE_PGID); 55 detach_pid(p, PIDTYPE_PGID);
56 detach_pid(p, PIDTYPE_SID); 56 detach_pid(p, PIDTYPE_SID);
57 if (p->pid) 57
58 __get_cpu_var(process_counts)--; 58 list_del_init(&p->tasks);
59 __get_cpu_var(process_counts)--;
60 }
61 list_del_rcu(&p->thread_group);
62 remove_parent(p);
63}
64
65/*
66 * This function expects the tasklist_lock write-locked.
67 */
68static void __exit_signal(struct task_struct *tsk)
69{
70 struct signal_struct *sig = tsk->signal;
71 struct sighand_struct *sighand;
72
73 BUG_ON(!sig);
74 BUG_ON(!atomic_read(&sig->count));
75
76 rcu_read_lock();
77 sighand = rcu_dereference(tsk->sighand);
78 spin_lock(&sighand->siglock);
79
80 posix_cpu_timers_exit(tsk);
81 if (atomic_dec_and_test(&sig->count))
82 posix_cpu_timers_exit_group(tsk);
83 else {
84 /*
85 * If there is any task waiting for the group exit
86 * then notify it:
87 */
88 if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) {
89 wake_up_process(sig->group_exit_task);
90 sig->group_exit_task = NULL;
91 }
92 if (tsk == sig->curr_target)
93 sig->curr_target = next_thread(tsk);
94 /*
95 * Accumulate here the counters for all threads but the
96 * group leader as they die, so they can be added into
97 * the process-wide totals when those are taken.
98 * The group leader stays around as a zombie as long
99 * as there are other threads. When it gets reaped,
100 * the exit.c code will add its counts into these totals.
101 * We won't ever get here for the group leader, since it
102 * will have been the last reference on the signal_struct.
103 */
104 sig->utime = cputime_add(sig->utime, tsk->utime);
105 sig->stime = cputime_add(sig->stime, tsk->stime);
106 sig->min_flt += tsk->min_flt;
107 sig->maj_flt += tsk->maj_flt;
108 sig->nvcsw += tsk->nvcsw;
109 sig->nivcsw += tsk->nivcsw;
110 sig->sched_time += tsk->sched_time;
111 sig = NULL; /* Marker for below. */
59 } 112 }
60 113
61 REMOVE_LINKS(p); 114 __unhash_process(tsk);
115
116 tsk->signal = NULL;
117 tsk->sighand = NULL;
118 spin_unlock(&sighand->siglock);
119 rcu_read_unlock();
120
121 __cleanup_sighand(sighand);
122 clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
123 flush_sigqueue(&tsk->pending);
124 if (sig) {
125 flush_sigqueue(&sig->shared_pending);
126 __cleanup_signal(sig);
127 }
62} 128}
63 129
64void release_task(struct task_struct * p) 130void release_task(struct task_struct * p)
@@ -67,21 +133,14 @@ void release_task(struct task_struct * p)
67 task_t *leader; 133 task_t *leader;
68 struct dentry *proc_dentry; 134 struct dentry *proc_dentry;
69 135
70repeat: 136repeat:
71 atomic_dec(&p->user->processes); 137 atomic_dec(&p->user->processes);
72 spin_lock(&p->proc_lock); 138 spin_lock(&p->proc_lock);
73 proc_dentry = proc_pid_unhash(p); 139 proc_dentry = proc_pid_unhash(p);
74 write_lock_irq(&tasklist_lock); 140 write_lock_irq(&tasklist_lock);
75 if (unlikely(p->ptrace)) 141 ptrace_unlink(p);
76 __ptrace_unlink(p);
77 BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children)); 142 BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children));
78 __exit_signal(p); 143 __exit_signal(p);
79 /*
80 * Note that the fastpath in sys_times depends on __exit_signal having
81 * updated the counters before a task is removed from the tasklist of
82 * the process by __unhash_process.
83 */
84 __unhash_process(p);
85 144
86 /* 145 /*
87 * If we are the last non-leader member of the thread 146 * If we are the last non-leader member of the thread
@@ -116,21 +175,6 @@ repeat:
116 goto repeat; 175 goto repeat;
117} 176}
118 177
119/* we are using it only for SMP init */
120
121void unhash_process(struct task_struct *p)
122{
123 struct dentry *proc_dentry;
124
125 spin_lock(&p->proc_lock);
126 proc_dentry = proc_pid_unhash(p);
127 write_lock_irq(&tasklist_lock);
128 __unhash_process(p);
129 write_unlock_irq(&tasklist_lock);
130 spin_unlock(&p->proc_lock);
131 proc_pid_flush(proc_dentry);
132}
133
134/* 178/*
135 * This checks not only the pgrp, but falls back on the pid if no 179 * This checks not only the pgrp, but falls back on the pid if no
136 * satisfactory pgrp is found. I dunno - gdb doesn't work correctly 180 * satisfactory pgrp is found. I dunno - gdb doesn't work correctly
@@ -238,10 +282,10 @@ static void reparent_to_init(void)
238 282
239 ptrace_unlink(current); 283 ptrace_unlink(current);
240 /* Reparent to init */ 284 /* Reparent to init */
241 REMOVE_LINKS(current); 285 remove_parent(current);
242 current->parent = child_reaper; 286 current->parent = child_reaper;
243 current->real_parent = child_reaper; 287 current->real_parent = child_reaper;
244 SET_LINKS(current); 288 add_parent(current);
245 289
246 /* Set the exit signal to SIGCHLD so we signal init on exit */ 290 /* Set the exit signal to SIGCHLD so we signal init on exit */
247 current->exit_signal = SIGCHLD; 291 current->exit_signal = SIGCHLD;
@@ -538,13 +582,13 @@ static void exit_mm(struct task_struct * tsk)
538 mmput(mm); 582 mmput(mm);
539} 583}
540 584
541static inline void choose_new_parent(task_t *p, task_t *reaper, task_t *child_reaper) 585static inline void choose_new_parent(task_t *p, task_t *reaper)
542{ 586{
543 /* 587 /*
544 * Make sure we're not reparenting to ourselves and that 588 * Make sure we're not reparenting to ourselves and that
545 * the parent is not a zombie. 589 * the parent is not a zombie.
546 */ 590 */
547 BUG_ON(p == reaper || reaper->exit_state >= EXIT_ZOMBIE); 591 BUG_ON(p == reaper || reaper->exit_state);
548 p->real_parent = reaper; 592 p->real_parent = reaper;
549} 593}
550 594
@@ -569,9 +613,9 @@ static void reparent_thread(task_t *p, task_t *father, int traced)
569 * anyway, so let go of it. 613 * anyway, so let go of it.
570 */ 614 */
571 p->ptrace = 0; 615 p->ptrace = 0;
572 list_del_init(&p->sibling); 616 remove_parent(p);
573 p->parent = p->real_parent; 617 p->parent = p->real_parent;
574 list_add_tail(&p->sibling, &p->parent->children); 618 add_parent(p);
575 619
576 /* If we'd notified the old parent about this child's death, 620 /* If we'd notified the old parent about this child's death,
577 * also notify the new parent. 621 * also notify the new parent.
@@ -645,7 +689,7 @@ static void forget_original_parent(struct task_struct * father,
645 689
646 if (father == p->real_parent) { 690 if (father == p->real_parent) {
647 /* reparent with a reaper, real father it's us */ 691 /* reparent with a reaper, real father it's us */
648 choose_new_parent(p, reaper, child_reaper); 692 choose_new_parent(p, reaper);
649 reparent_thread(p, father, 0); 693 reparent_thread(p, father, 0);
650 } else { 694 } else {
651 /* reparent ptraced task to its real parent */ 695 /* reparent ptraced task to its real parent */
@@ -666,7 +710,7 @@ static void forget_original_parent(struct task_struct * father,
666 } 710 }
667 list_for_each_safe(_p, _n, &father->ptrace_children) { 711 list_for_each_safe(_p, _n, &father->ptrace_children) {
668 p = list_entry(_p,struct task_struct,ptrace_list); 712 p = list_entry(_p,struct task_struct,ptrace_list);
669 choose_new_parent(p, reaper, child_reaper); 713 choose_new_parent(p, reaper);
670 reparent_thread(p, father, 1); 714 reparent_thread(p, father, 1);
671 } 715 }
672} 716}
@@ -807,7 +851,7 @@ fastcall NORET_TYPE void do_exit(long code)
807 panic("Aiee, killing interrupt handler!"); 851 panic("Aiee, killing interrupt handler!");
808 if (unlikely(!tsk->pid)) 852 if (unlikely(!tsk->pid))
809 panic("Attempted to kill the idle task!"); 853 panic("Attempted to kill the idle task!");
810 if (unlikely(tsk->pid == 1)) 854 if (unlikely(tsk == child_reaper))
811 panic("Attempted to kill init!"); 855 panic("Attempted to kill init!");
812 856
813 if (unlikely(current->ptrace & PT_TRACE_EXIT)) { 857 if (unlikely(current->ptrace & PT_TRACE_EXIT)) {
@@ -920,13 +964,6 @@ asmlinkage long sys_exit(int error_code)
920 do_exit((error_code&0xff)<<8); 964 do_exit((error_code&0xff)<<8);
921} 965}
922 966
923task_t fastcall *next_thread(const task_t *p)
924{
925 return pid_task(p->pids[PIDTYPE_TGID].pid_list.next, PIDTYPE_TGID);
926}
927
928EXPORT_SYMBOL(next_thread);
929
930/* 967/*
931 * Take down every thread in the group. This is called by fatal signals 968 * Take down every thread in the group. This is called by fatal signals
932 * as well as by sys_exit_group (below). 969 * as well as by sys_exit_group (below).
@@ -941,7 +978,6 @@ do_group_exit(int exit_code)
941 else if (!thread_group_empty(current)) { 978 else if (!thread_group_empty(current)) {
942 struct signal_struct *const sig = current->signal; 979 struct signal_struct *const sig = current->signal;
943 struct sighand_struct *const sighand = current->sighand; 980 struct sighand_struct *const sighand = current->sighand;
944 read_lock(&tasklist_lock);
945 spin_lock_irq(&sighand->siglock); 981 spin_lock_irq(&sighand->siglock);
946 if (sig->flags & SIGNAL_GROUP_EXIT) 982 if (sig->flags & SIGNAL_GROUP_EXIT)
947 /* Another thread got here before we took the lock. */ 983 /* Another thread got here before we took the lock. */
@@ -951,7 +987,6 @@ do_group_exit(int exit_code)
951 zap_other_threads(current); 987 zap_other_threads(current);
952 } 988 }
953 spin_unlock_irq(&sighand->siglock); 989 spin_unlock_irq(&sighand->siglock);
954 read_unlock(&tasklist_lock);
955 } 990 }
956 991
957 do_exit(exit_code); 992 do_exit(exit_code);
@@ -1281,7 +1316,7 @@ bail_ref:
1281 1316
1282 /* move to end of parent's list to avoid starvation */ 1317 /* move to end of parent's list to avoid starvation */
1283 remove_parent(p); 1318 remove_parent(p);
1284 add_parent(p, p->parent); 1319 add_parent(p);
1285 1320
1286 write_unlock_irq(&tasklist_lock); 1321 write_unlock_irq(&tasklist_lock);
1287 1322
diff --git a/kernel/fork.c b/kernel/fork.c
index c49bd193b058..b3f7a1bb5e55 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -84,7 +84,7 @@ static kmem_cache_t *task_struct_cachep;
84#endif 84#endif
85 85
86/* SLAB cache for signal_struct structures (tsk->signal) */ 86/* SLAB cache for signal_struct structures (tsk->signal) */
87kmem_cache_t *signal_cachep; 87static kmem_cache_t *signal_cachep;
88 88
89/* SLAB cache for sighand_struct structures (tsk->sighand) */ 89/* SLAB cache for sighand_struct structures (tsk->sighand) */
90kmem_cache_t *sighand_cachep; 90kmem_cache_t *sighand_cachep;
@@ -786,14 +786,6 @@ int unshare_files(void)
786 786
787EXPORT_SYMBOL(unshare_files); 787EXPORT_SYMBOL(unshare_files);
788 788
789void sighand_free_cb(struct rcu_head *rhp)
790{
791 struct sighand_struct *sp;
792
793 sp = container_of(rhp, struct sighand_struct, rcu);
794 kmem_cache_free(sighand_cachep, sp);
795}
796
797static inline int copy_sighand(unsigned long clone_flags, struct task_struct * tsk) 789static inline int copy_sighand(unsigned long clone_flags, struct task_struct * tsk)
798{ 790{
799 struct sighand_struct *sig; 791 struct sighand_struct *sig;
@@ -806,12 +798,17 @@ static inline int copy_sighand(unsigned long clone_flags, struct task_struct * t
806 rcu_assign_pointer(tsk->sighand, sig); 798 rcu_assign_pointer(tsk->sighand, sig);
807 if (!sig) 799 if (!sig)
808 return -ENOMEM; 800 return -ENOMEM;
809 spin_lock_init(&sig->siglock);
810 atomic_set(&sig->count, 1); 801 atomic_set(&sig->count, 1);
811 memcpy(sig->action, current->sighand->action, sizeof(sig->action)); 802 memcpy(sig->action, current->sighand->action, sizeof(sig->action));
812 return 0; 803 return 0;
813} 804}
814 805
806void __cleanup_sighand(struct sighand_struct *sighand)
807{
808 if (atomic_dec_and_test(&sighand->count))
809 kmem_cache_free(sighand_cachep, sighand);
810}
811
815static inline int copy_signal(unsigned long clone_flags, struct task_struct * tsk) 812static inline int copy_signal(unsigned long clone_flags, struct task_struct * tsk)
816{ 813{
817 struct signal_struct *sig; 814 struct signal_struct *sig;
@@ -881,6 +878,22 @@ static inline int copy_signal(unsigned long clone_flags, struct task_struct * ts
881 return 0; 878 return 0;
882} 879}
883 880
881void __cleanup_signal(struct signal_struct *sig)
882{
883 exit_thread_group_keys(sig);
884 kmem_cache_free(signal_cachep, sig);
885}
886
887static inline void cleanup_signal(struct task_struct *tsk)
888{
889 struct signal_struct *sig = tsk->signal;
890
891 atomic_dec(&sig->live);
892
893 if (atomic_dec_and_test(&sig->count))
894 __cleanup_signal(sig);
895}
896
884static inline void copy_flags(unsigned long clone_flags, struct task_struct *p) 897static inline void copy_flags(unsigned long clone_flags, struct task_struct *p)
885{ 898{
886 unsigned long new_flags = p->flags; 899 unsigned long new_flags = p->flags;
@@ -1095,6 +1108,7 @@ static task_t *copy_process(unsigned long clone_flags,
1095 * We dont wake it up yet. 1108 * We dont wake it up yet.
1096 */ 1109 */
1097 p->group_leader = p; 1110 p->group_leader = p;
1111 INIT_LIST_HEAD(&p->thread_group);
1098 INIT_LIST_HEAD(&p->ptrace_children); 1112 INIT_LIST_HEAD(&p->ptrace_children);
1099 INIT_LIST_HEAD(&p->ptrace_list); 1113 INIT_LIST_HEAD(&p->ptrace_list);
1100 1114
@@ -1118,16 +1132,6 @@ static task_t *copy_process(unsigned long clone_flags,
1118 !cpu_online(task_cpu(p)))) 1132 !cpu_online(task_cpu(p))))
1119 set_task_cpu(p, smp_processor_id()); 1133 set_task_cpu(p, smp_processor_id());
1120 1134
1121 /*
1122 * Check for pending SIGKILL! The new thread should not be allowed
1123 * to slip out of an OOM kill. (or normal SIGKILL.)
1124 */
1125 if (sigismember(&current->pending.signal, SIGKILL)) {
1126 write_unlock_irq(&tasklist_lock);
1127 retval = -EINTR;
1128 goto bad_fork_cleanup_namespace;
1129 }
1130
1131 /* CLONE_PARENT re-uses the old parent */ 1135 /* CLONE_PARENT re-uses the old parent */
1132 if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) 1136 if (clone_flags & (CLONE_PARENT|CLONE_THREAD))
1133 p->real_parent = current->real_parent; 1137 p->real_parent = current->real_parent;
@@ -1136,6 +1140,23 @@ static task_t *copy_process(unsigned long clone_flags,
1136 p->parent = p->real_parent; 1140 p->parent = p->real_parent;
1137 1141
1138 spin_lock(&current->sighand->siglock); 1142 spin_lock(&current->sighand->siglock);
1143
1144 /*
1145 * Process group and session signals need to be delivered to just the
1146 * parent before the fork or both the parent and the child after the
1147 * fork. Restart if a signal comes in before we add the new process to
1148 * it's process group.
1149 * A fatal signal pending means that current will exit, so the new
1150 * thread can't slip out of an OOM kill (or normal SIGKILL).
1151 */
1152 recalc_sigpending();
1153 if (signal_pending(current)) {
1154 spin_unlock(&current->sighand->siglock);
1155 write_unlock_irq(&tasklist_lock);
1156 retval = -ERESTARTNOINTR;
1157 goto bad_fork_cleanup_namespace;
1158 }
1159
1139 if (clone_flags & CLONE_THREAD) { 1160 if (clone_flags & CLONE_THREAD) {
1140 /* 1161 /*
1141 * Important: if an exit-all has been started then 1162 * Important: if an exit-all has been started then
@@ -1148,17 +1169,9 @@ static task_t *copy_process(unsigned long clone_flags,
1148 retval = -EAGAIN; 1169 retval = -EAGAIN;
1149 goto bad_fork_cleanup_namespace; 1170 goto bad_fork_cleanup_namespace;
1150 } 1171 }
1151 p->group_leader = current->group_leader;
1152 1172
1153 if (current->signal->group_stop_count > 0) { 1173 p->group_leader = current->group_leader;
1154 /* 1174 list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
1155 * There is an all-stop in progress for the group.
1156 * We ourselves will stop as soon as we check signals.
1157 * Make the new thread part of that group stop too.
1158 */
1159 current->signal->group_stop_count++;
1160 set_tsk_thread_flag(p, TIF_SIGPENDING);
1161 }
1162 1175
1163 if (!cputime_eq(current->signal->it_virt_expires, 1176 if (!cputime_eq(current->signal->it_virt_expires,
1164 cputime_zero) || 1177 cputime_zero) ||
@@ -1181,23 +1194,25 @@ static task_t *copy_process(unsigned long clone_flags,
1181 */ 1194 */
1182 p->ioprio = current->ioprio; 1195 p->ioprio = current->ioprio;
1183 1196
1184 SET_LINKS(p); 1197 if (likely(p->pid)) {
1185 if (unlikely(p->ptrace & PT_PTRACED)) 1198 add_parent(p);
1186 __ptrace_link(p, current->parent); 1199 if (unlikely(p->ptrace & PT_PTRACED))
1187 1200 __ptrace_link(p, current->parent);
1188 if (thread_group_leader(p)) { 1201
1189 p->signal->tty = current->signal->tty; 1202 if (thread_group_leader(p)) {
1190 p->signal->pgrp = process_group(current); 1203 p->signal->tty = current->signal->tty;
1191 p->signal->session = current->signal->session; 1204 p->signal->pgrp = process_group(current);
1192 attach_pid(p, PIDTYPE_PGID, process_group(p)); 1205 p->signal->session = current->signal->session;
1193 attach_pid(p, PIDTYPE_SID, p->signal->session); 1206 attach_pid(p, PIDTYPE_PGID, process_group(p));
1194 if (p->pid) 1207 attach_pid(p, PIDTYPE_SID, p->signal->session);
1208
1209 list_add_tail(&p->tasks, &init_task.tasks);
1195 __get_cpu_var(process_counts)++; 1210 __get_cpu_var(process_counts)++;
1211 }
1212 attach_pid(p, PIDTYPE_PID, p->pid);
1213 nr_threads++;
1196 } 1214 }
1197 attach_pid(p, PIDTYPE_TGID, p->tgid);
1198 attach_pid(p, PIDTYPE_PID, p->pid);
1199 1215
1200 nr_threads++;
1201 total_forks++; 1216 total_forks++;
1202 spin_unlock(&current->sighand->siglock); 1217 spin_unlock(&current->sighand->siglock);
1203 write_unlock_irq(&tasklist_lock); 1218 write_unlock_irq(&tasklist_lock);
@@ -1212,9 +1227,9 @@ bad_fork_cleanup_mm:
1212 if (p->mm) 1227 if (p->mm)
1213 mmput(p->mm); 1228 mmput(p->mm);
1214bad_fork_cleanup_signal: 1229bad_fork_cleanup_signal:
1215 exit_signal(p); 1230 cleanup_signal(p);
1216bad_fork_cleanup_sighand: 1231bad_fork_cleanup_sighand:
1217 exit_sighand(p); 1232 __cleanup_sighand(p->sighand);
1218bad_fork_cleanup_fs: 1233bad_fork_cleanup_fs:
1219 exit_fs(p); /* blocking */ 1234 exit_fs(p); /* blocking */
1220bad_fork_cleanup_files: 1235bad_fork_cleanup_files:
@@ -1261,7 +1276,7 @@ task_t * __devinit fork_idle(int cpu)
1261 if (!task) 1276 if (!task)
1262 return ERR_PTR(-ENOMEM); 1277 return ERR_PTR(-ENOMEM);
1263 init_idle(task, cpu); 1278 init_idle(task, cpu);
1264 unhash_process(task); 1279
1265 return task; 1280 return task;
1266} 1281}
1267 1282
@@ -1353,11 +1368,21 @@ long do_fork(unsigned long clone_flags,
1353#define ARCH_MIN_MMSTRUCT_ALIGN 0 1368#define ARCH_MIN_MMSTRUCT_ALIGN 0
1354#endif 1369#endif
1355 1370
1371static void sighand_ctor(void *data, kmem_cache_t *cachep, unsigned long flags)
1372{
1373 struct sighand_struct *sighand = data;
1374
1375 if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) ==
1376 SLAB_CTOR_CONSTRUCTOR)
1377 spin_lock_init(&sighand->siglock);
1378}
1379
1356void __init proc_caches_init(void) 1380void __init proc_caches_init(void)
1357{ 1381{
1358 sighand_cachep = kmem_cache_create("sighand_cache", 1382 sighand_cachep = kmem_cache_create("sighand_cache",
1359 sizeof(struct sighand_struct), 0, 1383 sizeof(struct sighand_struct), 0,
1360 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); 1384 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU,
1385 sighand_ctor, NULL);
1361 signal_cachep = kmem_cache_create("signal_cache", 1386 signal_cachep = kmem_cache_create("signal_cache",
1362 sizeof(struct signal_struct), 0, 1387 sizeof(struct signal_struct), 0,
1363 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); 1388 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 51a892063aaa..20a997c73c3d 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -170,7 +170,7 @@ static int wait_for_helper(void *data)
170 sa.sa.sa_handler = SIG_IGN; 170 sa.sa.sa_handler = SIG_IGN;
171 sa.sa.sa_flags = 0; 171 sa.sa.sa_flags = 0;
172 siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD)); 172 siginitset(&sa.sa.sa_mask, sigmask(SIGCHLD));
173 do_sigaction(SIGCHLD, &sa, (struct k_sigaction *)0); 173 do_sigaction(SIGCHLD, &sa, NULL);
174 allow_signal(SIGCHLD); 174 allow_signal(SIGCHLD);
175 175
176 pid = kernel_thread(____call_usermodehelper, sub_info, SIGCHLD); 176 pid = kernel_thread(____call_usermodehelper, sub_info, SIGCHLD);
diff --git a/kernel/pid.c b/kernel/pid.c
index 1acc07246991..a9f2dfd006d2 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -218,36 +218,6 @@ task_t *find_task_by_pid_type(int type, int nr)
218EXPORT_SYMBOL(find_task_by_pid_type); 218EXPORT_SYMBOL(find_task_by_pid_type);
219 219
220/* 220/*
221 * This function switches the PIDs if a non-leader thread calls
222 * sys_execve() - this must be done without releasing the PID.
223 * (which a detach_pid() would eventually do.)
224 */
225void switch_exec_pids(task_t *leader, task_t *thread)
226{
227 __detach_pid(leader, PIDTYPE_PID);
228 __detach_pid(leader, PIDTYPE_TGID);
229 __detach_pid(leader, PIDTYPE_PGID);
230 __detach_pid(leader, PIDTYPE_SID);
231
232 __detach_pid(thread, PIDTYPE_PID);
233 __detach_pid(thread, PIDTYPE_TGID);
234
235 leader->pid = leader->tgid = thread->pid;
236 thread->pid = thread->tgid;
237
238 attach_pid(thread, PIDTYPE_PID, thread->pid);
239 attach_pid(thread, PIDTYPE_TGID, thread->tgid);
240 attach_pid(thread, PIDTYPE_PGID, thread->signal->pgrp);
241 attach_pid(thread, PIDTYPE_SID, thread->signal->session);
242 list_add_tail(&thread->tasks, &init_task.tasks);
243
244 attach_pid(leader, PIDTYPE_PID, leader->pid);
245 attach_pid(leader, PIDTYPE_TGID, leader->tgid);
246 attach_pid(leader, PIDTYPE_PGID, leader->signal->pgrp);
247 attach_pid(leader, PIDTYPE_SID, leader->signal->session);
248}
249
250/*
251 * The pid hash table is scaled according to the amount of memory in the 221 * The pid hash table is scaled according to the amount of memory in the
252 * machine. From a minimum of 16 slots up to 4096 slots at one gigabyte or 222 * machine. From a minimum of 16 slots up to 4096 slots at one gigabyte or
253 * more. 223 * more.
@@ -277,16 +247,8 @@ void __init pidhash_init(void)
277 247
278void __init pidmap_init(void) 248void __init pidmap_init(void)
279{ 249{
280 int i;
281
282 pidmap_array->page = (void *)get_zeroed_page(GFP_KERNEL); 250 pidmap_array->page = (void *)get_zeroed_page(GFP_KERNEL);
251 /* Reserve PID 0. We never call free_pidmap(0) */
283 set_bit(0, pidmap_array->page); 252 set_bit(0, pidmap_array->page);
284 atomic_dec(&pidmap_array->nr_free); 253 atomic_dec(&pidmap_array->nr_free);
285
286 /*
287 * Allocate PID 0, and hash it via all PID types:
288 */
289
290 for (i = 0; i < PIDTYPE_MAX; i++)
291 attach_pid(current, i, 0);
292} 254}
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index d95a72c9279d..86a7f6c60cb2 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -35,9 +35,9 @@ void __ptrace_link(task_t *child, task_t *new_parent)
35 if (child->parent == new_parent) 35 if (child->parent == new_parent)
36 return; 36 return;
37 list_add(&child->ptrace_list, &child->parent->ptrace_children); 37 list_add(&child->ptrace_list, &child->parent->ptrace_children);
38 REMOVE_LINKS(child); 38 remove_parent(child);
39 child->parent = new_parent; 39 child->parent = new_parent;
40 SET_LINKS(child); 40 add_parent(child);
41} 41}
42 42
43/* 43/*
@@ -77,9 +77,9 @@ void __ptrace_unlink(task_t *child)
77 child->ptrace = 0; 77 child->ptrace = 0;
78 if (!list_empty(&child->ptrace_list)) { 78 if (!list_empty(&child->ptrace_list)) {
79 list_del_init(&child->ptrace_list); 79 list_del_init(&child->ptrace_list);
80 REMOVE_LINKS(child); 80 remove_parent(child);
81 child->parent = child->real_parent; 81 child->parent = child->real_parent;
82 SET_LINKS(child); 82 add_parent(child);
83 } 83 }
84 84
85 ptrace_untrace(child); 85 ptrace_untrace(child);
diff --git a/kernel/signal.c b/kernel/signal.c
index 75f7341b0c39..4922928d91f6 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -22,7 +22,6 @@
22#include <linux/security.h> 22#include <linux/security.h>
23#include <linux/syscalls.h> 23#include <linux/syscalls.h>
24#include <linux/ptrace.h> 24#include <linux/ptrace.h>
25#include <linux/posix-timers.h>
26#include <linux/signal.h> 25#include <linux/signal.h>
27#include <linux/audit.h> 26#include <linux/audit.h>
28#include <linux/capability.h> 27#include <linux/capability.h>
@@ -147,6 +146,8 @@ static kmem_cache_t *sigqueue_cachep;
147#define sig_kernel_stop(sig) \ 146#define sig_kernel_stop(sig) \
148 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_STOP_MASK)) 147 (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_STOP_MASK))
149 148
149#define sig_needs_tasklist(sig) ((sig) == SIGCONT)
150
150#define sig_user_defined(t, signr) \ 151#define sig_user_defined(t, signr) \
151 (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \ 152 (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \
152 ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN)) 153 ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN))
@@ -292,7 +293,7 @@ static void __sigqueue_free(struct sigqueue *q)
292 kmem_cache_free(sigqueue_cachep, q); 293 kmem_cache_free(sigqueue_cachep, q);
293} 294}
294 295
295static void flush_sigqueue(struct sigpending *queue) 296void flush_sigqueue(struct sigpending *queue)
296{ 297{
297 struct sigqueue *q; 298 struct sigqueue *q;
298 299
@@ -307,9 +308,7 @@ static void flush_sigqueue(struct sigpending *queue)
307/* 308/*
308 * Flush all pending signals for a task. 309 * Flush all pending signals for a task.
309 */ 310 */
310 311void flush_signals(struct task_struct *t)
311void
312flush_signals(struct task_struct *t)
313{ 312{
314 unsigned long flags; 313 unsigned long flags;
315 314
@@ -321,109 +320,6 @@ flush_signals(struct task_struct *t)
321} 320}
322 321
323/* 322/*
324 * This function expects the tasklist_lock write-locked.
325 */
326void __exit_sighand(struct task_struct *tsk)
327{
328 struct sighand_struct * sighand = tsk->sighand;
329
330 /* Ok, we're done with the signal handlers */
331 tsk->sighand = NULL;
332 if (atomic_dec_and_test(&sighand->count))
333 sighand_free(sighand);
334}
335
336void exit_sighand(struct task_struct *tsk)
337{
338 write_lock_irq(&tasklist_lock);
339 rcu_read_lock();
340 if (tsk->sighand != NULL) {
341 struct sighand_struct *sighand = rcu_dereference(tsk->sighand);
342 spin_lock(&sighand->siglock);
343 __exit_sighand(tsk);
344 spin_unlock(&sighand->siglock);
345 }
346 rcu_read_unlock();
347 write_unlock_irq(&tasklist_lock);
348}
349
350/*
351 * This function expects the tasklist_lock write-locked.
352 */
353void __exit_signal(struct task_struct *tsk)
354{
355 struct signal_struct * sig = tsk->signal;
356 struct sighand_struct * sighand;
357
358 if (!sig)
359 BUG();
360 if (!atomic_read(&sig->count))
361 BUG();
362 rcu_read_lock();
363 sighand = rcu_dereference(tsk->sighand);
364 spin_lock(&sighand->siglock);
365 posix_cpu_timers_exit(tsk);
366 if (atomic_dec_and_test(&sig->count)) {
367 posix_cpu_timers_exit_group(tsk);
368 tsk->signal = NULL;
369 __exit_sighand(tsk);
370 spin_unlock(&sighand->siglock);
371 flush_sigqueue(&sig->shared_pending);
372 } else {
373 /*
374 * If there is any task waiting for the group exit
375 * then notify it:
376 */
377 if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) {
378 wake_up_process(sig->group_exit_task);
379 sig->group_exit_task = NULL;
380 }
381 if (tsk == sig->curr_target)
382 sig->curr_target = next_thread(tsk);
383 tsk->signal = NULL;
384 /*
385 * Accumulate here the counters for all threads but the
386 * group leader as they die, so they can be added into
387 * the process-wide totals when those are taken.
388 * The group leader stays around as a zombie as long
389 * as there are other threads. When it gets reaped,
390 * the exit.c code will add its counts into these totals.
391 * We won't ever get here for the group leader, since it
392 * will have been the last reference on the signal_struct.
393 */
394 sig->utime = cputime_add(sig->utime, tsk->utime);
395 sig->stime = cputime_add(sig->stime, tsk->stime);
396 sig->min_flt += tsk->min_flt;
397 sig->maj_flt += tsk->maj_flt;
398 sig->nvcsw += tsk->nvcsw;
399 sig->nivcsw += tsk->nivcsw;
400 sig->sched_time += tsk->sched_time;
401 __exit_sighand(tsk);
402 spin_unlock(&sighand->siglock);
403 sig = NULL; /* Marker for below. */
404 }
405 rcu_read_unlock();
406 clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
407 flush_sigqueue(&tsk->pending);
408 if (sig) {
409 /*
410 * We are cleaning up the signal_struct here.
411 */
412 exit_thread_group_keys(sig);
413 kmem_cache_free(signal_cachep, sig);
414 }
415}
416
417void exit_signal(struct task_struct *tsk)
418{
419 atomic_dec(&tsk->signal->live);
420
421 write_lock_irq(&tasklist_lock);
422 __exit_signal(tsk);
423 write_unlock_irq(&tasklist_lock);
424}
425
426/*
427 * Flush all handlers for a task. 323 * Flush all handlers for a task.
428 */ 324 */
429 325
@@ -695,9 +591,7 @@ static int check_kill_permission(int sig, struct siginfo *info,
695} 591}
696 592
697/* forward decl */ 593/* forward decl */
698static void do_notify_parent_cldstop(struct task_struct *tsk, 594static void do_notify_parent_cldstop(struct task_struct *tsk, int why);
699 int to_self,
700 int why);
701 595
702/* 596/*
703 * Handle magic process-wide effects of stop/continue signals. 597 * Handle magic process-wide effects of stop/continue signals.
@@ -747,7 +641,7 @@ static void handle_stop_signal(int sig, struct task_struct *p)
747 p->signal->group_stop_count = 0; 641 p->signal->group_stop_count = 0;
748 p->signal->flags = SIGNAL_STOP_CONTINUED; 642 p->signal->flags = SIGNAL_STOP_CONTINUED;
749 spin_unlock(&p->sighand->siglock); 643 spin_unlock(&p->sighand->siglock);
750 do_notify_parent_cldstop(p, (p->ptrace & PT_PTRACED), CLD_STOPPED); 644 do_notify_parent_cldstop(p, CLD_STOPPED);
751 spin_lock(&p->sighand->siglock); 645 spin_lock(&p->sighand->siglock);
752 } 646 }
753 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending); 647 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
@@ -788,7 +682,7 @@ static void handle_stop_signal(int sig, struct task_struct *p)
788 p->signal->flags = SIGNAL_STOP_CONTINUED; 682 p->signal->flags = SIGNAL_STOP_CONTINUED;
789 p->signal->group_exit_code = 0; 683 p->signal->group_exit_code = 0;
790 spin_unlock(&p->sighand->siglock); 684 spin_unlock(&p->sighand->siglock);
791 do_notify_parent_cldstop(p, (p->ptrace & PT_PTRACED), CLD_CONTINUED); 685 do_notify_parent_cldstop(p, CLD_CONTINUED);
792 spin_lock(&p->sighand->siglock); 686 spin_lock(&p->sighand->siglock);
793 } else { 687 } else {
794 /* 688 /*
@@ -1120,27 +1014,37 @@ void zap_other_threads(struct task_struct *p)
1120/* 1014/*
1121 * Must be called under rcu_read_lock() or with tasklist_lock read-held. 1015 * Must be called under rcu_read_lock() or with tasklist_lock read-held.
1122 */ 1016 */
1017struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
1018{
1019 struct sighand_struct *sighand;
1020
1021 for (;;) {
1022 sighand = rcu_dereference(tsk->sighand);
1023 if (unlikely(sighand == NULL))
1024 break;
1025
1026 spin_lock_irqsave(&sighand->siglock, *flags);
1027 if (likely(sighand == tsk->sighand))
1028 break;
1029 spin_unlock_irqrestore(&sighand->siglock, *flags);
1030 }
1031
1032 return sighand;
1033}
1034
1123int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) 1035int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1124{ 1036{
1125 unsigned long flags; 1037 unsigned long flags;
1126 struct sighand_struct *sp;
1127 int ret; 1038 int ret;
1128 1039
1129retry:
1130 ret = check_kill_permission(sig, info, p); 1040 ret = check_kill_permission(sig, info, p);
1131 if (!ret && sig && (sp = rcu_dereference(p->sighand))) { 1041
1132 spin_lock_irqsave(&sp->siglock, flags); 1042 if (!ret && sig) {
1133 if (p->sighand != sp) { 1043 ret = -ESRCH;
1134 spin_unlock_irqrestore(&sp->siglock, flags); 1044 if (lock_task_sighand(p, &flags)) {
1135 goto retry; 1045 ret = __group_send_sig_info(sig, info, p);
1136 } 1046 unlock_task_sighand(p, &flags);
1137 if ((atomic_read(&sp->count) == 0) ||
1138 (atomic_read(&p->usage) == 0)) {
1139 spin_unlock_irqrestore(&sp->siglock, flags);
1140 return -ESRCH;
1141 } 1047 }
1142 ret = __group_send_sig_info(sig, info, p);
1143 spin_unlock_irqrestore(&sp->siglock, flags);
1144 } 1048 }
1145 1049
1146 return ret; 1050 return ret;
@@ -1189,7 +1093,7 @@ kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1189 struct task_struct *p; 1093 struct task_struct *p;
1190 1094
1191 rcu_read_lock(); 1095 rcu_read_lock();
1192 if (unlikely(sig_kernel_stop(sig) || sig == SIGCONT)) { 1096 if (unlikely(sig_needs_tasklist(sig))) {
1193 read_lock(&tasklist_lock); 1097 read_lock(&tasklist_lock);
1194 acquired_tasklist_lock = 1; 1098 acquired_tasklist_lock = 1;
1195 } 1099 }
@@ -1405,12 +1309,10 @@ void sigqueue_free(struct sigqueue *q)
1405 __sigqueue_free(q); 1309 __sigqueue_free(q);
1406} 1310}
1407 1311
1408int 1312int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1409send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1410{ 1313{
1411 unsigned long flags; 1314 unsigned long flags;
1412 int ret = 0; 1315 int ret = 0;
1413 struct sighand_struct *sh;
1414 1316
1415 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); 1317 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1416 1318
@@ -1424,48 +1326,17 @@ send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1424 */ 1326 */
1425 rcu_read_lock(); 1327 rcu_read_lock();
1426 1328
1427 if (unlikely(p->flags & PF_EXITING)) { 1329 if (!likely(lock_task_sighand(p, &flags))) {
1428 ret = -1; 1330 ret = -1;
1429 goto out_err; 1331 goto out_err;
1430 } 1332 }
1431 1333
1432retry:
1433 sh = rcu_dereference(p->sighand);
1434
1435 spin_lock_irqsave(&sh->siglock, flags);
1436 if (p->sighand != sh) {
1437 /* We raced with exec() in a multithreaded process... */
1438 spin_unlock_irqrestore(&sh->siglock, flags);
1439 goto retry;
1440 }
1441
1442 /*
1443 * We do the check here again to handle the following scenario:
1444 *
1445 * CPU 0 CPU 1
1446 * send_sigqueue
1447 * check PF_EXITING
1448 * interrupt exit code running
1449 * __exit_signal
1450 * lock sighand->siglock
1451 * unlock sighand->siglock
1452 * lock sh->siglock
1453 * add(tsk->pending) flush_sigqueue(tsk->pending)
1454 *
1455 */
1456
1457 if (unlikely(p->flags & PF_EXITING)) {
1458 ret = -1;
1459 goto out;
1460 }
1461
1462 if (unlikely(!list_empty(&q->list))) { 1334 if (unlikely(!list_empty(&q->list))) {
1463 /* 1335 /*
1464 * If an SI_TIMER entry is already queue just increment 1336 * If an SI_TIMER entry is already queue just increment
1465 * the overrun count. 1337 * the overrun count.
1466 */ 1338 */
1467 if (q->info.si_code != SI_TIMER) 1339 BUG_ON(q->info.si_code != SI_TIMER);
1468 BUG();
1469 q->info.si_overrun++; 1340 q->info.si_overrun++;
1470 goto out; 1341 goto out;
1471 } 1342 }
@@ -1481,7 +1352,7 @@ retry:
1481 signal_wake_up(p, sig == SIGKILL); 1352 signal_wake_up(p, sig == SIGKILL);
1482 1353
1483out: 1354out:
1484 spin_unlock_irqrestore(&sh->siglock, flags); 1355 unlock_task_sighand(p, &flags);
1485out_err: 1356out_err:
1486 rcu_read_unlock(); 1357 rcu_read_unlock();
1487 1358
@@ -1613,14 +1484,14 @@ void do_notify_parent(struct task_struct *tsk, int sig)
1613 spin_unlock_irqrestore(&psig->siglock, flags); 1484 spin_unlock_irqrestore(&psig->siglock, flags);
1614} 1485}
1615 1486
1616static void do_notify_parent_cldstop(struct task_struct *tsk, int to_self, int why) 1487static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
1617{ 1488{
1618 struct siginfo info; 1489 struct siginfo info;
1619 unsigned long flags; 1490 unsigned long flags;
1620 struct task_struct *parent; 1491 struct task_struct *parent;
1621 struct sighand_struct *sighand; 1492 struct sighand_struct *sighand;
1622 1493
1623 if (to_self) 1494 if (tsk->ptrace & PT_PTRACED)
1624 parent = tsk->parent; 1495 parent = tsk->parent;
1625 else { 1496 else {
1626 tsk = tsk->group_leader; 1497 tsk = tsk->group_leader;
@@ -1695,7 +1566,7 @@ static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
1695 !(current->ptrace & PT_ATTACHED)) && 1566 !(current->ptrace & PT_ATTACHED)) &&
1696 (likely(current->parent->signal != current->signal) || 1567 (likely(current->parent->signal != current->signal) ||
1697 !unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))) { 1568 !unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))) {
1698 do_notify_parent_cldstop(current, 1, CLD_TRAPPED); 1569 do_notify_parent_cldstop(current, CLD_TRAPPED);
1699 read_unlock(&tasklist_lock); 1570 read_unlock(&tasklist_lock);
1700 schedule(); 1571 schedule();
1701 } else { 1572 } else {
@@ -1744,25 +1615,17 @@ void ptrace_notify(int exit_code)
1744static void 1615static void
1745finish_stop(int stop_count) 1616finish_stop(int stop_count)
1746{ 1617{
1747 int to_self;
1748
1749 /* 1618 /*
1750 * If there are no other threads in the group, or if there is 1619 * If there are no other threads in the group, or if there is
1751 * a group stop in progress and we are the last to stop, 1620 * a group stop in progress and we are the last to stop,
1752 * report to the parent. When ptraced, every thread reports itself. 1621 * report to the parent. When ptraced, every thread reports itself.
1753 */ 1622 */
1754 if (stop_count < 0 || (current->ptrace & PT_PTRACED)) 1623 if (stop_count == 0 || (current->ptrace & PT_PTRACED)) {
1755 to_self = 1; 1624 read_lock(&tasklist_lock);
1756 else if (stop_count == 0) 1625 do_notify_parent_cldstop(current, CLD_STOPPED);
1757 to_self = 0; 1626 read_unlock(&tasklist_lock);
1758 else 1627 }
1759 goto out;
1760
1761 read_lock(&tasklist_lock);
1762 do_notify_parent_cldstop(current, to_self, CLD_STOPPED);
1763 read_unlock(&tasklist_lock);
1764 1628
1765out:
1766 schedule(); 1629 schedule();
1767 /* 1630 /*
1768 * Now we don't run again until continued. 1631 * Now we don't run again until continued.
@@ -1776,12 +1639,10 @@ out:
1776 * Returns nonzero if we've actually stopped and released the siglock. 1639 * Returns nonzero if we've actually stopped and released the siglock.
1777 * Returns zero if we didn't stop and still hold the siglock. 1640 * Returns zero if we didn't stop and still hold the siglock.
1778 */ 1641 */
1779static int 1642static int do_signal_stop(int signr)
1780do_signal_stop(int signr)
1781{ 1643{
1782 struct signal_struct *sig = current->signal; 1644 struct signal_struct *sig = current->signal;
1783 struct sighand_struct *sighand = current->sighand; 1645 int stop_count;
1784 int stop_count = -1;
1785 1646
1786 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED)) 1647 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED))
1787 return 0; 1648 return 0;
@@ -1791,86 +1652,37 @@ do_signal_stop(int signr)
1791 * There is a group stop in progress. We don't need to 1652 * There is a group stop in progress. We don't need to
1792 * start another one. 1653 * start another one.
1793 */ 1654 */
1794 signr = sig->group_exit_code;
1795 stop_count = --sig->group_stop_count; 1655 stop_count = --sig->group_stop_count;
1796 current->exit_code = signr; 1656 } else {
1797 set_current_state(TASK_STOPPED);
1798 if (stop_count == 0)
1799 sig->flags = SIGNAL_STOP_STOPPED;
1800 spin_unlock_irq(&sighand->siglock);
1801 }
1802 else if (thread_group_empty(current)) {
1803 /*
1804 * Lock must be held through transition to stopped state.
1805 */
1806 current->exit_code = current->signal->group_exit_code = signr;
1807 set_current_state(TASK_STOPPED);
1808 sig->flags = SIGNAL_STOP_STOPPED;
1809 spin_unlock_irq(&sighand->siglock);
1810 }
1811 else {
1812 /* 1657 /*
1813 * There is no group stop already in progress. 1658 * There is no group stop already in progress.
1814 * We must initiate one now, but that requires 1659 * We must initiate one now.
1815 * dropping siglock to get both the tasklist lock
1816 * and siglock again in the proper order. Note that
1817 * this allows an intervening SIGCONT to be posted.
1818 * We need to check for that and bail out if necessary.
1819 */ 1660 */
1820 struct task_struct *t; 1661 struct task_struct *t;
1821 1662
1822 spin_unlock_irq(&sighand->siglock); 1663 sig->group_exit_code = signr;
1823
1824 /* signals can be posted during this window */
1825 1664
1826 read_lock(&tasklist_lock); 1665 stop_count = 0;
1827 spin_lock_irq(&sighand->siglock); 1666 for (t = next_thread(current); t != current; t = next_thread(t))
1828
1829 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED)) {
1830 /* 1667 /*
1831 * Another stop or continue happened while we 1668 * Setting state to TASK_STOPPED for a group
1832 * didn't have the lock. We can just swallow this 1669 * stop is always done with the siglock held,
1833 * signal now. If we raced with a SIGCONT, that 1670 * so this check has no races.
1834 * should have just cleared it now. If we raced
1835 * with another processor delivering a stop signal,
1836 * then the SIGCONT that wakes us up should clear it.
1837 */ 1671 */
1838 read_unlock(&tasklist_lock); 1672 if (!t->exit_state &&
1839 return 0; 1673 !(t->state & (TASK_STOPPED|TASK_TRACED))) {
1840 } 1674 stop_count++;
1841 1675 signal_wake_up(t, 0);
1842 if (sig->group_stop_count == 0) { 1676 }
1843 sig->group_exit_code = signr; 1677 sig->group_stop_count = stop_count;
1844 stop_count = 0;
1845 for (t = next_thread(current); t != current;
1846 t = next_thread(t))
1847 /*
1848 * Setting state to TASK_STOPPED for a group
1849 * stop is always done with the siglock held,
1850 * so this check has no races.
1851 */
1852 if (!t->exit_state &&
1853 !(t->state & (TASK_STOPPED|TASK_TRACED))) {
1854 stop_count++;
1855 signal_wake_up(t, 0);
1856 }
1857 sig->group_stop_count = stop_count;
1858 }
1859 else {
1860 /* A race with another thread while unlocked. */
1861 signr = sig->group_exit_code;
1862 stop_count = --sig->group_stop_count;
1863 }
1864
1865 current->exit_code = signr;
1866 set_current_state(TASK_STOPPED);
1867 if (stop_count == 0)
1868 sig->flags = SIGNAL_STOP_STOPPED;
1869
1870 spin_unlock_irq(&sighand->siglock);
1871 read_unlock(&tasklist_lock);
1872 } 1678 }
1873 1679
1680 if (stop_count == 0)
1681 sig->flags = SIGNAL_STOP_STOPPED;
1682 current->exit_code = sig->group_exit_code;
1683 __set_current_state(TASK_STOPPED);
1684
1685 spin_unlock_irq(&current->sighand->siglock);
1874 finish_stop(stop_count); 1686 finish_stop(stop_count);
1875 return 1; 1687 return 1;
1876} 1688}
@@ -1990,7 +1802,7 @@ relock:
1990 continue; 1802 continue;
1991 1803
1992 /* Init gets no signals it doesn't want. */ 1804 /* Init gets no signals it doesn't want. */
1993 if (current->pid == 1) 1805 if (current == child_reaper)
1994 continue; 1806 continue;
1995 1807
1996 if (sig_kernel_stop(signr)) { 1808 if (sig_kernel_stop(signr)) {
@@ -2430,8 +2242,7 @@ sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
2430 return kill_proc_info(sig, &info, pid); 2242 return kill_proc_info(sig, &info, pid);
2431} 2243}
2432 2244
2433int 2245int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2434do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2435{ 2246{
2436 struct k_sigaction *k; 2247 struct k_sigaction *k;
2437 sigset_t mask; 2248 sigset_t mask;
@@ -2457,6 +2268,7 @@ do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2457 if (act) { 2268 if (act) {
2458 sigdelsetmask(&act->sa.sa_mask, 2269 sigdelsetmask(&act->sa.sa_mask,
2459 sigmask(SIGKILL) | sigmask(SIGSTOP)); 2270 sigmask(SIGKILL) | sigmask(SIGSTOP));
2271 *k = *act;
2460 /* 2272 /*
2461 * POSIX 3.3.1.3: 2273 * POSIX 3.3.1.3:
2462 * "Setting a signal action to SIG_IGN for a signal that is 2274 * "Setting a signal action to SIG_IGN for a signal that is
@@ -2469,19 +2281,8 @@ do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2469 * be discarded, whether or not it is blocked" 2281 * be discarded, whether or not it is blocked"
2470 */ 2282 */
2471 if (act->sa.sa_handler == SIG_IGN || 2283 if (act->sa.sa_handler == SIG_IGN ||
2472 (act->sa.sa_handler == SIG_DFL && 2284 (act->sa.sa_handler == SIG_DFL && sig_kernel_ignore(sig))) {
2473 sig_kernel_ignore(sig))) {
2474 /*
2475 * This is a fairly rare case, so we only take the
2476 * tasklist_lock once we're sure we'll need it.
2477 * Now we must do this little unlock and relock
2478 * dance to maintain the lock hierarchy.
2479 */
2480 struct task_struct *t = current; 2285 struct task_struct *t = current;
2481 spin_unlock_irq(&t->sighand->siglock);
2482 read_lock(&tasklist_lock);
2483 spin_lock_irq(&t->sighand->siglock);
2484 *k = *act;
2485 sigemptyset(&mask); 2286 sigemptyset(&mask);
2486 sigaddset(&mask, sig); 2287 sigaddset(&mask, sig);
2487 rm_from_queue_full(&mask, &t->signal->shared_pending); 2288 rm_from_queue_full(&mask, &t->signal->shared_pending);
@@ -2490,12 +2291,7 @@ do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2490 recalc_sigpending_tsk(t); 2291 recalc_sigpending_tsk(t);
2491 t = next_thread(t); 2292 t = next_thread(t);
2492 } while (t != current); 2293 } while (t != current);
2493 spin_unlock_irq(&current->sighand->siglock);
2494 read_unlock(&tasklist_lock);
2495 return 0;
2496 } 2294 }
2497
2498 *k = *act;
2499 } 2295 }
2500 2296
2501 spin_unlock_irq(&current->sighand->siglock); 2297 spin_unlock_irq(&current->sighand->siglock);
diff --git a/kernel/sys.c b/kernel/sys.c
index c93d37f71aef..7ef7f6054c28 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1202,69 +1202,24 @@ asmlinkage long sys_times(struct tms __user * tbuf)
1202 */ 1202 */
1203 if (tbuf) { 1203 if (tbuf) {
1204 struct tms tmp; 1204 struct tms tmp;
1205 struct task_struct *tsk = current;
1206 struct task_struct *t;
1205 cputime_t utime, stime, cutime, cstime; 1207 cputime_t utime, stime, cutime, cstime;
1206 1208
1207#ifdef CONFIG_SMP 1209 spin_lock_irq(&tsk->sighand->siglock);
1208 if (thread_group_empty(current)) { 1210 utime = tsk->signal->utime;
1209 /* 1211 stime = tsk->signal->stime;
1210 * Single thread case without the use of any locks. 1212 t = tsk;
1211 * 1213 do {
1212 * We may race with release_task if two threads are 1214 utime = cputime_add(utime, t->utime);
1213 * executing. However, release task first adds up the 1215 stime = cputime_add(stime, t->stime);
1214 * counters (__exit_signal) before removing the task 1216 t = next_thread(t);
1215 * from the process tasklist (__unhash_process). 1217 } while (t != tsk);
1216 * __exit_signal also acquires and releases the
1217 * siglock which results in the proper memory ordering
1218 * so that the list modifications are always visible
1219 * after the counters have been updated.
1220 *
1221 * If the counters have been updated by the second thread
1222 * but the thread has not yet been removed from the list
1223 * then the other branch will be executing which will
1224 * block on tasklist_lock until the exit handling of the
1225 * other task is finished.
1226 *
1227 * This also implies that the sighand->siglock cannot
1228 * be held by another processor. So we can also
1229 * skip acquiring that lock.
1230 */
1231 utime = cputime_add(current->signal->utime, current->utime);
1232 stime = cputime_add(current->signal->utime, current->stime);
1233 cutime = current->signal->cutime;
1234 cstime = current->signal->cstime;
1235 } else
1236#endif
1237 {
1238 1218
1239 /* Process with multiple threads */ 1219 cutime = tsk->signal->cutime;
1240 struct task_struct *tsk = current; 1220 cstime = tsk->signal->cstime;
1241 struct task_struct *t; 1221 spin_unlock_irq(&tsk->sighand->siglock);
1242 1222
1243 read_lock(&tasklist_lock);
1244 utime = tsk->signal->utime;
1245 stime = tsk->signal->stime;
1246 t = tsk;
1247 do {
1248 utime = cputime_add(utime, t->utime);
1249 stime = cputime_add(stime, t->stime);
1250 t = next_thread(t);
1251 } while (t != tsk);
1252
1253 /*
1254 * While we have tasklist_lock read-locked, no dying thread
1255 * can be updating current->signal->[us]time. Instead,
1256 * we got their counts included in the live thread loop.
1257 * However, another thread can come in right now and
1258 * do a wait call that updates current->signal->c[us]time.
1259 * To make sure we always see that pair updated atomically,
1260 * we take the siglock around fetching them.
1261 */
1262 spin_lock_irq(&tsk->sighand->siglock);
1263 cutime = tsk->signal->cutime;
1264 cstime = tsk->signal->cstime;
1265 spin_unlock_irq(&tsk->sighand->siglock);
1266 read_unlock(&tasklist_lock);
1267 }
1268 tmp.tms_utime = cputime_to_clock_t(utime); 1223 tmp.tms_utime = cputime_to_clock_t(utime);
1269 tmp.tms_stime = cputime_to_clock_t(stime); 1224 tmp.tms_stime = cputime_to_clock_t(stime);
1270 tmp.tms_cutime = cputime_to_clock_t(cutime); 1225 tmp.tms_cutime = cputime_to_clock_t(cutime);