aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2006-07-03 13:49:45 -0400
committerTrond Myklebust <Trond.Myklebust@netapp.com>2006-07-03 13:49:45 -0400
commit026477c1141b67e98e3bd8bdedb7d4b88a3ecd09 (patch)
tree2624a44924c625c367f3cebf937853b9da2de282 /kernel
parent9f2fa466383ce100b90fe52cb4489d7a26bf72a9 (diff)
parent29454dde27d8e340bb1987bad9aa504af7081eba (diff)
Merge branch 'master' of /home/trondmy/kernel/linux-2.6/
Diffstat (limited to 'kernel')
-rw-r--r--kernel/acct.c1
-rw-r--r--kernel/audit.c8
-rw-r--r--kernel/audit.h1
-rw-r--r--kernel/auditfilter.c209
-rw-r--r--kernel/auditsc.c65
-rw-r--r--kernel/configs.c1
-rw-r--r--kernel/cpuset.c3
-rw-r--r--kernel/exec_domain.c1
-rw-r--r--kernel/exit.c1
-rw-r--r--kernel/fork.c1
-rw-r--r--kernel/futex.c8
-rw-r--r--kernel/irq/Makefile2
-rw-r--r--kernel/irq/autoprobe.c56
-rw-r--r--kernel/irq/chip.c534
-rw-r--r--kernel/irq/handle.c140
-rw-r--r--kernel/irq/internals.h46
-rw-r--r--kernel/irq/manage.c179
-rw-r--r--kernel/irq/migration.c20
-rw-r--r--kernel/irq/proc.c30
-rw-r--r--kernel/irq/resend.c78
-rw-r--r--kernel/irq/spurious.c37
-rw-r--r--kernel/kexec.c6
-rw-r--r--kernel/kmod.c1
-rw-r--r--kernel/ksysfs.c1
-rw-r--r--kernel/module.c1
-rw-r--r--kernel/panic.c1
-rw-r--r--kernel/params.c1
-rw-r--r--kernel/power/Kconfig12
-rw-r--r--kernel/printk.c1
-rw-r--r--kernel/profile.c1
-rw-r--r--kernel/resource.c53
-rw-r--r--kernel/sched.c25
-rw-r--r--kernel/signal.c8
-rw-r--r--kernel/spinlock.c1
-rw-r--r--kernel/sys.c1
-rw-r--r--kernel/sysctl.c10
-rw-r--r--kernel/wait.c1
37 files changed, 1217 insertions, 328 deletions
diff --git a/kernel/acct.c b/kernel/acct.c
index 126ca43d5d2b..f18e0b8df3e1 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -43,7 +43,6 @@
43 * a struct file opened for write. Fixed. 2/6/2000, AV. 43 * a struct file opened for write. Fixed. 2/6/2000, AV.
44 */ 44 */
45 45
46#include <linux/config.h>
47#include <linux/mm.h> 46#include <linux/mm.h>
48#include <linux/slab.h> 47#include <linux/slab.h>
49#include <linux/acct.h> 48#include <linux/acct.h>
diff --git a/kernel/audit.c b/kernel/audit.c
index 82443fb433ef..d417ca1db79b 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -445,7 +445,7 @@ void audit_send_reply(int pid, int seq, int type, int done, int multi,
445 * Check for appropriate CAP_AUDIT_ capabilities on incoming audit 445 * Check for appropriate CAP_AUDIT_ capabilities on incoming audit
446 * control messages. 446 * control messages.
447 */ 447 */
448static int audit_netlink_ok(kernel_cap_t eff_cap, u16 msg_type) 448static int audit_netlink_ok(struct sk_buff *skb, u16 msg_type)
449{ 449{
450 int err = 0; 450 int err = 0;
451 451
@@ -459,13 +459,13 @@ static int audit_netlink_ok(kernel_cap_t eff_cap, u16 msg_type)
459 case AUDIT_DEL: 459 case AUDIT_DEL:
460 case AUDIT_DEL_RULE: 460 case AUDIT_DEL_RULE:
461 case AUDIT_SIGNAL_INFO: 461 case AUDIT_SIGNAL_INFO:
462 if (!cap_raised(eff_cap, CAP_AUDIT_CONTROL)) 462 if (security_netlink_recv(skb, CAP_AUDIT_CONTROL))
463 err = -EPERM; 463 err = -EPERM;
464 break; 464 break;
465 case AUDIT_USER: 465 case AUDIT_USER:
466 case AUDIT_FIRST_USER_MSG...AUDIT_LAST_USER_MSG: 466 case AUDIT_FIRST_USER_MSG...AUDIT_LAST_USER_MSG:
467 case AUDIT_FIRST_USER_MSG2...AUDIT_LAST_USER_MSG2: 467 case AUDIT_FIRST_USER_MSG2...AUDIT_LAST_USER_MSG2:
468 if (!cap_raised(eff_cap, CAP_AUDIT_WRITE)) 468 if (security_netlink_recv(skb, CAP_AUDIT_WRITE))
469 err = -EPERM; 469 err = -EPERM;
470 break; 470 break;
471 default: /* bad msg */ 471 default: /* bad msg */
@@ -488,7 +488,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
488 char *ctx; 488 char *ctx;
489 u32 len; 489 u32 len;
490 490
491 err = audit_netlink_ok(NETLINK_CB(skb).eff_cap, msg_type); 491 err = audit_netlink_ok(skb, msg_type);
492 if (err) 492 if (err)
493 return err; 493 return err;
494 494
diff --git a/kernel/audit.h b/kernel/audit.h
index 8323e4132a33..6aa33b848cf2 100644
--- a/kernel/audit.h
+++ b/kernel/audit.h
@@ -81,6 +81,7 @@ struct audit_krule {
81 u32 mask[AUDIT_BITMASK_SIZE]; 81 u32 mask[AUDIT_BITMASK_SIZE];
82 u32 buflen; /* for data alloc on list rules */ 82 u32 buflen; /* for data alloc on list rules */
83 u32 field_count; 83 u32 field_count;
84 char *filterkey; /* ties events to rules */
84 struct audit_field *fields; 85 struct audit_field *fields;
85 struct audit_field *inode_f; /* quick access to an inode field */ 86 struct audit_field *inode_f; /* quick access to an inode field */
86 struct audit_watch *watch; /* associated watch */ 87 struct audit_watch *watch; /* associated watch */
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index 4c99d2c586ed..5b4e16276ca0 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -141,6 +141,7 @@ static inline void audit_free_rule(struct audit_entry *e)
141 selinux_audit_rule_free(f->se_rule); 141 selinux_audit_rule_free(f->se_rule);
142 } 142 }
143 kfree(e->rule.fields); 143 kfree(e->rule.fields);
144 kfree(e->rule.filterkey);
144 kfree(e); 145 kfree(e);
145} 146}
146 147
@@ -278,6 +279,29 @@ static int audit_to_watch(struct audit_krule *krule, char *path, int len,
278 return 0; 279 return 0;
279} 280}
280 281
282static __u32 *classes[AUDIT_SYSCALL_CLASSES];
283
284int __init audit_register_class(int class, unsigned *list)
285{
286 __u32 *p = kzalloc(AUDIT_BITMASK_SIZE * sizeof(__u32), GFP_KERNEL);
287 if (!p)
288 return -ENOMEM;
289 while (*list != ~0U) {
290 unsigned n = *list++;
291 if (n >= AUDIT_BITMASK_SIZE * 32 - AUDIT_SYSCALL_CLASSES) {
292 kfree(p);
293 return -EINVAL;
294 }
295 p[AUDIT_WORD(n)] |= AUDIT_BIT(n);
296 }
297 if (class >= AUDIT_SYSCALL_CLASSES || classes[class]) {
298 kfree(p);
299 return -EINVAL;
300 }
301 classes[class] = p;
302 return 0;
303}
304
281/* Common user-space to kernel rule translation. */ 305/* Common user-space to kernel rule translation. */
282static inline struct audit_entry *audit_to_entry_common(struct audit_rule *rule) 306static inline struct audit_entry *audit_to_entry_common(struct audit_rule *rule)
283{ 307{
@@ -321,6 +345,22 @@ static inline struct audit_entry *audit_to_entry_common(struct audit_rule *rule)
321 for (i = 0; i < AUDIT_BITMASK_SIZE; i++) 345 for (i = 0; i < AUDIT_BITMASK_SIZE; i++)
322 entry->rule.mask[i] = rule->mask[i]; 346 entry->rule.mask[i] = rule->mask[i];
323 347
348 for (i = 0; i < AUDIT_SYSCALL_CLASSES; i++) {
349 int bit = AUDIT_BITMASK_SIZE * 32 - i - 1;
350 __u32 *p = &entry->rule.mask[AUDIT_WORD(bit)];
351 __u32 *class;
352
353 if (!(*p & AUDIT_BIT(bit)))
354 continue;
355 *p &= ~AUDIT_BIT(bit);
356 class = classes[i];
357 if (class) {
358 int j;
359 for (j = 0; j < AUDIT_BITMASK_SIZE; j++)
360 entry->rule.mask[j] |= class[j];
361 }
362 }
363
324 return entry; 364 return entry;
325 365
326exit_err: 366exit_err:
@@ -469,11 +509,16 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
469 case AUDIT_ARG2: 509 case AUDIT_ARG2:
470 case AUDIT_ARG3: 510 case AUDIT_ARG3:
471 break; 511 break;
472 case AUDIT_SE_USER: 512 case AUDIT_SUBJ_USER:
473 case AUDIT_SE_ROLE: 513 case AUDIT_SUBJ_ROLE:
474 case AUDIT_SE_TYPE: 514 case AUDIT_SUBJ_TYPE:
475 case AUDIT_SE_SEN: 515 case AUDIT_SUBJ_SEN:
476 case AUDIT_SE_CLR: 516 case AUDIT_SUBJ_CLR:
517 case AUDIT_OBJ_USER:
518 case AUDIT_OBJ_ROLE:
519 case AUDIT_OBJ_TYPE:
520 case AUDIT_OBJ_LEV_LOW:
521 case AUDIT_OBJ_LEV_HIGH:
477 str = audit_unpack_string(&bufp, &remain, f->val); 522 str = audit_unpack_string(&bufp, &remain, f->val);
478 if (IS_ERR(str)) 523 if (IS_ERR(str))
479 goto exit_free; 524 goto exit_free;
@@ -511,6 +556,16 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
511 if (err) 556 if (err)
512 goto exit_free; 557 goto exit_free;
513 break; 558 break;
559 case AUDIT_FILTERKEY:
560 err = -EINVAL;
561 if (entry->rule.filterkey || f->val > AUDIT_MAX_KEY_LEN)
562 goto exit_free;
563 str = audit_unpack_string(&bufp, &remain, f->val);
564 if (IS_ERR(str))
565 goto exit_free;
566 entry->rule.buflen += f->val;
567 entry->rule.filterkey = str;
568 break;
514 default: 569 default:
515 goto exit_free; 570 goto exit_free;
516 } 571 }
@@ -600,11 +655,16 @@ static struct audit_rule_data *audit_krule_to_data(struct audit_krule *krule)
600 data->fields[i] = f->type; 655 data->fields[i] = f->type;
601 data->fieldflags[i] = f->op; 656 data->fieldflags[i] = f->op;
602 switch(f->type) { 657 switch(f->type) {
603 case AUDIT_SE_USER: 658 case AUDIT_SUBJ_USER:
604 case AUDIT_SE_ROLE: 659 case AUDIT_SUBJ_ROLE:
605 case AUDIT_SE_TYPE: 660 case AUDIT_SUBJ_TYPE:
606 case AUDIT_SE_SEN: 661 case AUDIT_SUBJ_SEN:
607 case AUDIT_SE_CLR: 662 case AUDIT_SUBJ_CLR:
663 case AUDIT_OBJ_USER:
664 case AUDIT_OBJ_ROLE:
665 case AUDIT_OBJ_TYPE:
666 case AUDIT_OBJ_LEV_LOW:
667 case AUDIT_OBJ_LEV_HIGH:
608 data->buflen += data->values[i] = 668 data->buflen += data->values[i] =
609 audit_pack_string(&bufp, f->se_str); 669 audit_pack_string(&bufp, f->se_str);
610 break; 670 break;
@@ -612,6 +672,10 @@ static struct audit_rule_data *audit_krule_to_data(struct audit_krule *krule)
612 data->buflen += data->values[i] = 672 data->buflen += data->values[i] =
613 audit_pack_string(&bufp, krule->watch->path); 673 audit_pack_string(&bufp, krule->watch->path);
614 break; 674 break;
675 case AUDIT_FILTERKEY:
676 data->buflen += data->values[i] =
677 audit_pack_string(&bufp, krule->filterkey);
678 break;
615 default: 679 default:
616 data->values[i] = f->val; 680 data->values[i] = f->val;
617 } 681 }
@@ -639,11 +703,16 @@ static int audit_compare_rule(struct audit_krule *a, struct audit_krule *b)
639 return 1; 703 return 1;
640 704
641 switch(a->fields[i].type) { 705 switch(a->fields[i].type) {
642 case AUDIT_SE_USER: 706 case AUDIT_SUBJ_USER:
643 case AUDIT_SE_ROLE: 707 case AUDIT_SUBJ_ROLE:
644 case AUDIT_SE_TYPE: 708 case AUDIT_SUBJ_TYPE:
645 case AUDIT_SE_SEN: 709 case AUDIT_SUBJ_SEN:
646 case AUDIT_SE_CLR: 710 case AUDIT_SUBJ_CLR:
711 case AUDIT_OBJ_USER:
712 case AUDIT_OBJ_ROLE:
713 case AUDIT_OBJ_TYPE:
714 case AUDIT_OBJ_LEV_LOW:
715 case AUDIT_OBJ_LEV_HIGH:
647 if (strcmp(a->fields[i].se_str, b->fields[i].se_str)) 716 if (strcmp(a->fields[i].se_str, b->fields[i].se_str))
648 return 1; 717 return 1;
649 break; 718 break;
@@ -651,6 +720,11 @@ static int audit_compare_rule(struct audit_krule *a, struct audit_krule *b)
651 if (strcmp(a->watch->path, b->watch->path)) 720 if (strcmp(a->watch->path, b->watch->path))
652 return 1; 721 return 1;
653 break; 722 break;
723 case AUDIT_FILTERKEY:
724 /* both filterkeys exist based on above type compare */
725 if (strcmp(a->filterkey, b->filterkey))
726 return 1;
727 break;
654 default: 728 default:
655 if (a->fields[i].val != b->fields[i].val) 729 if (a->fields[i].val != b->fields[i].val)
656 return 1; 730 return 1;
@@ -730,6 +804,7 @@ static struct audit_entry *audit_dupe_rule(struct audit_krule *old,
730 u32 fcount = old->field_count; 804 u32 fcount = old->field_count;
731 struct audit_entry *entry; 805 struct audit_entry *entry;
732 struct audit_krule *new; 806 struct audit_krule *new;
807 char *fk;
733 int i, err = 0; 808 int i, err = 0;
734 809
735 entry = audit_init_entry(fcount); 810 entry = audit_init_entry(fcount);
@@ -753,13 +828,25 @@ static struct audit_entry *audit_dupe_rule(struct audit_krule *old,
753 * the originals will all be freed when the old rule is freed. */ 828 * the originals will all be freed when the old rule is freed. */
754 for (i = 0; i < fcount; i++) { 829 for (i = 0; i < fcount; i++) {
755 switch (new->fields[i].type) { 830 switch (new->fields[i].type) {
756 case AUDIT_SE_USER: 831 case AUDIT_SUBJ_USER:
757 case AUDIT_SE_ROLE: 832 case AUDIT_SUBJ_ROLE:
758 case AUDIT_SE_TYPE: 833 case AUDIT_SUBJ_TYPE:
759 case AUDIT_SE_SEN: 834 case AUDIT_SUBJ_SEN:
760 case AUDIT_SE_CLR: 835 case AUDIT_SUBJ_CLR:
836 case AUDIT_OBJ_USER:
837 case AUDIT_OBJ_ROLE:
838 case AUDIT_OBJ_TYPE:
839 case AUDIT_OBJ_LEV_LOW:
840 case AUDIT_OBJ_LEV_HIGH:
761 err = audit_dupe_selinux_field(&new->fields[i], 841 err = audit_dupe_selinux_field(&new->fields[i],
762 &old->fields[i]); 842 &old->fields[i]);
843 break;
844 case AUDIT_FILTERKEY:
845 fk = kstrdup(old->filterkey, GFP_KERNEL);
846 if (unlikely(!fk))
847 err = -ENOMEM;
848 else
849 new->filterkey = fk;
763 } 850 }
764 if (err) { 851 if (err) {
765 audit_free_rule(entry); 852 audit_free_rule(entry);
@@ -1245,6 +1332,34 @@ static void audit_list_rules(int pid, int seq, struct sk_buff_head *q)
1245 skb_queue_tail(q, skb); 1332 skb_queue_tail(q, skb);
1246} 1333}
1247 1334
1335/* Log rule additions and removals */
1336static void audit_log_rule_change(uid_t loginuid, u32 sid, char *action,
1337 struct audit_krule *rule, int res)
1338{
1339 struct audit_buffer *ab;
1340
1341 ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
1342 if (!ab)
1343 return;
1344 audit_log_format(ab, "auid=%u", loginuid);
1345 if (sid) {
1346 char *ctx = NULL;
1347 u32 len;
1348 if (selinux_ctxid_to_string(sid, &ctx, &len))
1349 audit_log_format(ab, " ssid=%u", sid);
1350 else
1351 audit_log_format(ab, " subj=%s", ctx);
1352 kfree(ctx);
1353 }
1354 audit_log_format(ab, " %s rule key=", action);
1355 if (rule->filterkey)
1356 audit_log_untrustedstring(ab, rule->filterkey);
1357 else
1358 audit_log_format(ab, "(null)");
1359 audit_log_format(ab, " list=%d res=%d", rule->listnr, res);
1360 audit_log_end(ab);
1361}
1362
1248/** 1363/**
1249 * audit_receive_filter - apply all rules to the specified message type 1364 * audit_receive_filter - apply all rules to the specified message type
1250 * @type: audit message type 1365 * @type: audit message type
@@ -1304,24 +1419,7 @@ int audit_receive_filter(int type, int pid, int uid, int seq, void *data,
1304 1419
1305 err = audit_add_rule(entry, 1420 err = audit_add_rule(entry,
1306 &audit_filter_list[entry->rule.listnr]); 1421 &audit_filter_list[entry->rule.listnr]);
1307 1422 audit_log_rule_change(loginuid, sid, "add", &entry->rule, !err);
1308 if (sid) {
1309 char *ctx = NULL;
1310 u32 len;
1311 if (selinux_ctxid_to_string(sid, &ctx, &len)) {
1312 /* Maybe call audit_panic? */
1313 audit_log(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE,
1314 "auid=%u ssid=%u add rule to list=%d res=%d",
1315 loginuid, sid, entry->rule.listnr, !err);
1316 } else
1317 audit_log(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE,
1318 "auid=%u subj=%s add rule to list=%d res=%d",
1319 loginuid, ctx, entry->rule.listnr, !err);
1320 kfree(ctx);
1321 } else
1322 audit_log(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE,
1323 "auid=%u add rule to list=%d res=%d",
1324 loginuid, entry->rule.listnr, !err);
1325 1423
1326 if (err) 1424 if (err)
1327 audit_free_rule(entry); 1425 audit_free_rule(entry);
@@ -1337,24 +1435,8 @@ int audit_receive_filter(int type, int pid, int uid, int seq, void *data,
1337 1435
1338 err = audit_del_rule(entry, 1436 err = audit_del_rule(entry,
1339 &audit_filter_list[entry->rule.listnr]); 1437 &audit_filter_list[entry->rule.listnr]);
1340 1438 audit_log_rule_change(loginuid, sid, "remove", &entry->rule,
1341 if (sid) { 1439 !err);
1342 char *ctx = NULL;
1343 u32 len;
1344 if (selinux_ctxid_to_string(sid, &ctx, &len)) {
1345 /* Maybe call audit_panic? */
1346 audit_log(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE,
1347 "auid=%u ssid=%u remove rule from list=%d res=%d",
1348 loginuid, sid, entry->rule.listnr, !err);
1349 } else
1350 audit_log(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE,
1351 "auid=%u subj=%s remove rule from list=%d res=%d",
1352 loginuid, ctx, entry->rule.listnr, !err);
1353 kfree(ctx);
1354 } else
1355 audit_log(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE,
1356 "auid=%u remove rule from list=%d res=%d",
1357 loginuid, entry->rule.listnr, !err);
1358 1440
1359 audit_free_rule(entry); 1441 audit_free_rule(entry);
1360 break; 1442 break;
@@ -1514,11 +1596,16 @@ static inline int audit_rule_has_selinux(struct audit_krule *rule)
1514 for (i = 0; i < rule->field_count; i++) { 1596 for (i = 0; i < rule->field_count; i++) {
1515 struct audit_field *f = &rule->fields[i]; 1597 struct audit_field *f = &rule->fields[i];
1516 switch (f->type) { 1598 switch (f->type) {
1517 case AUDIT_SE_USER: 1599 case AUDIT_SUBJ_USER:
1518 case AUDIT_SE_ROLE: 1600 case AUDIT_SUBJ_ROLE:
1519 case AUDIT_SE_TYPE: 1601 case AUDIT_SUBJ_TYPE:
1520 case AUDIT_SE_SEN: 1602 case AUDIT_SUBJ_SEN:
1521 case AUDIT_SE_CLR: 1603 case AUDIT_SUBJ_CLR:
1604 case AUDIT_OBJ_USER:
1605 case AUDIT_OBJ_ROLE:
1606 case AUDIT_OBJ_TYPE:
1607 case AUDIT_OBJ_LEV_LOW:
1608 case AUDIT_OBJ_LEV_HIGH:
1522 return 1; 1609 return 1;
1523 } 1610 }
1524 } 1611 }
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index dc5e3f01efe7..ae40ac8c39e7 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -186,6 +186,7 @@ struct audit_context {
186 int auditable; /* 1 if record should be written */ 186 int auditable; /* 1 if record should be written */
187 int name_count; 187 int name_count;
188 struct audit_names names[AUDIT_NAMES]; 188 struct audit_names names[AUDIT_NAMES];
189 char * filterkey; /* key for rule that triggered record */
189 struct dentry * pwd; 190 struct dentry * pwd;
190 struct vfsmount * pwdmnt; 191 struct vfsmount * pwdmnt;
191 struct audit_context *previous; /* For nested syscalls */ 192 struct audit_context *previous; /* For nested syscalls */
@@ -320,11 +321,11 @@ static int audit_filter_rules(struct task_struct *tsk,
320 if (ctx) 321 if (ctx)
321 result = audit_comparator(ctx->loginuid, f->op, f->val); 322 result = audit_comparator(ctx->loginuid, f->op, f->val);
322 break; 323 break;
323 case AUDIT_SE_USER: 324 case AUDIT_SUBJ_USER:
324 case AUDIT_SE_ROLE: 325 case AUDIT_SUBJ_ROLE:
325 case AUDIT_SE_TYPE: 326 case AUDIT_SUBJ_TYPE:
326 case AUDIT_SE_SEN: 327 case AUDIT_SUBJ_SEN:
327 case AUDIT_SE_CLR: 328 case AUDIT_SUBJ_CLR:
328 /* NOTE: this may return negative values indicating 329 /* NOTE: this may return negative values indicating
329 a temporary error. We simply treat this as a 330 a temporary error. We simply treat this as a
330 match for now to avoid losing information that 331 match for now to avoid losing information that
@@ -341,6 +342,46 @@ static int audit_filter_rules(struct task_struct *tsk,
341 ctx); 342 ctx);
342 } 343 }
343 break; 344 break;
345 case AUDIT_OBJ_USER:
346 case AUDIT_OBJ_ROLE:
347 case AUDIT_OBJ_TYPE:
348 case AUDIT_OBJ_LEV_LOW:
349 case AUDIT_OBJ_LEV_HIGH:
350 /* The above note for AUDIT_SUBJ_USER...AUDIT_SUBJ_CLR
351 also applies here */
352 if (f->se_rule) {
353 /* Find files that match */
354 if (name) {
355 result = selinux_audit_rule_match(
356 name->osid, f->type, f->op,
357 f->se_rule, ctx);
358 } else if (ctx) {
359 for (j = 0; j < ctx->name_count; j++) {
360 if (selinux_audit_rule_match(
361 ctx->names[j].osid,
362 f->type, f->op,
363 f->se_rule, ctx)) {
364 ++result;
365 break;
366 }
367 }
368 }
369 /* Find ipc objects that match */
370 if (ctx) {
371 struct audit_aux_data *aux;
372 for (aux = ctx->aux; aux;
373 aux = aux->next) {
374 if (aux->type == AUDIT_IPC) {
375 struct audit_aux_data_ipcctl *axi = (void *)aux;
376 if (selinux_audit_rule_match(axi->osid, f->type, f->op, f->se_rule, ctx)) {
377 ++result;
378 break;
379 }
380 }
381 }
382 }
383 }
384 break;
344 case AUDIT_ARG0: 385 case AUDIT_ARG0:
345 case AUDIT_ARG1: 386 case AUDIT_ARG1:
346 case AUDIT_ARG2: 387 case AUDIT_ARG2:
@@ -348,11 +389,17 @@ static int audit_filter_rules(struct task_struct *tsk,
348 if (ctx) 389 if (ctx)
349 result = audit_comparator(ctx->argv[f->type-AUDIT_ARG0], f->op, f->val); 390 result = audit_comparator(ctx->argv[f->type-AUDIT_ARG0], f->op, f->val);
350 break; 391 break;
392 case AUDIT_FILTERKEY:
393 /* ignore this field for filtering */
394 result = 1;
395 break;
351 } 396 }
352 397
353 if (!result) 398 if (!result)
354 return 0; 399 return 0;
355 } 400 }
401 if (rule->filterkey)
402 ctx->filterkey = kstrdup(rule->filterkey, GFP_ATOMIC);
356 switch (rule->action) { 403 switch (rule->action) {
357 case AUDIT_NEVER: *state = AUDIT_DISABLED; break; 404 case AUDIT_NEVER: *state = AUDIT_DISABLED; break;
358 case AUDIT_ALWAYS: *state = AUDIT_RECORD_CONTEXT; break; 405 case AUDIT_ALWAYS: *state = AUDIT_RECORD_CONTEXT; break;
@@ -627,6 +674,7 @@ static inline void audit_free_context(struct audit_context *context)
627 } 674 }
628 audit_free_names(context); 675 audit_free_names(context);
629 audit_free_aux(context); 676 audit_free_aux(context);
677 kfree(context->filterkey);
630 kfree(context); 678 kfree(context);
631 context = previous; 679 context = previous;
632 } while (context); 680 } while (context);
@@ -735,6 +783,11 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts
735 context->euid, context->suid, context->fsuid, 783 context->euid, context->suid, context->fsuid,
736 context->egid, context->sgid, context->fsgid, tty); 784 context->egid, context->sgid, context->fsgid, tty);
737 audit_log_task_info(ab, tsk); 785 audit_log_task_info(ab, tsk);
786 if (context->filterkey) {
787 audit_log_format(ab, " key=");
788 audit_log_untrustedstring(ab, context->filterkey);
789 } else
790 audit_log_format(ab, " key=(null)");
738 audit_log_end(ab); 791 audit_log_end(ab);
739 792
740 for (aux = context->aux; aux; aux = aux->next) { 793 for (aux = context->aux; aux; aux = aux->next) {
@@ -1060,6 +1113,8 @@ void audit_syscall_exit(int valid, long return_code)
1060 } else { 1113 } else {
1061 audit_free_names(context); 1114 audit_free_names(context);
1062 audit_free_aux(context); 1115 audit_free_aux(context);
1116 kfree(context->filterkey);
1117 context->filterkey = NULL;
1063 tsk->audit_context = context; 1118 tsk->audit_context = context;
1064 } 1119 }
1065} 1120}
diff --git a/kernel/configs.c b/kernel/configs.c
index 009e1ebdcb88..f9e31974f4ad 100644
--- a/kernel/configs.c
+++ b/kernel/configs.c
@@ -23,7 +23,6 @@
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 */ 24 */
25 25
26#include <linux/config.h>
27#include <linux/kernel.h> 26#include <linux/kernel.h>
28#include <linux/module.h> 27#include <linux/module.h>
29#include <linux/proc_fs.h> 28#include <linux/proc_fs.h>
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 1535af3a912d..c232dc077438 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -18,7 +18,6 @@
18 * distribution for more details. 18 * distribution for more details.
19 */ 19 */
20 20
21#include <linux/config.h>
22#include <linux/cpu.h> 21#include <linux/cpu.h>
23#include <linux/cpumask.h> 22#include <linux/cpumask.h>
24#include <linux/cpuset.h> 23#include <linux/cpuset.h>
@@ -1064,7 +1063,7 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, char *buf)
1064} 1063}
1065 1064
1066/* 1065/*
1067 * Frequency meter - How fast is some event occuring? 1066 * Frequency meter - How fast is some event occurring?
1068 * 1067 *
1069 * These routines manage a digitally filtered, constant time based, 1068 * These routines manage a digitally filtered, constant time based,
1070 * event frequency meter. There are four routines: 1069 * event frequency meter. There are four routines:
diff --git a/kernel/exec_domain.c b/kernel/exec_domain.c
index c01cead2cfd6..3c2eaea66b1e 100644
--- a/kernel/exec_domain.c
+++ b/kernel/exec_domain.c
@@ -7,7 +7,6 @@
7 * 2001-05-06 Complete rewrite, Christoph Hellwig (hch@infradead.org) 7 * 2001-05-06 Complete rewrite, Christoph Hellwig (hch@infradead.org)
8 */ 8 */
9 9
10#include <linux/config.h>
11#include <linux/init.h> 10#include <linux/init.h>
12#include <linux/kernel.h> 11#include <linux/kernel.h>
13#include <linux/kmod.h> 12#include <linux/kmod.h>
diff --git a/kernel/exit.c b/kernel/exit.c
index ab06b9f88f64..7f7ef2258553 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -4,7 +4,6 @@
4 * Copyright (C) 1991, 1992 Linus Torvalds 4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */ 5 */
6 6
7#include <linux/config.h>
8#include <linux/mm.h> 7#include <linux/mm.h>
9#include <linux/slab.h> 8#include <linux/slab.h>
10#include <linux/interrupt.h> 9#include <linux/interrupt.h>
diff --git a/kernel/fork.c b/kernel/fork.c
index 628198a4f28a..9064bf9e131b 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -11,7 +11,6 @@
11 * management can be a bitch. See 'mm/memory.c': 'copy_page_range()' 11 * management can be a bitch. See 'mm/memory.c': 'copy_page_range()'
12 */ 12 */
13 13
14#include <linux/config.h>
15#include <linux/slab.h> 14#include <linux/slab.h>
16#include <linux/init.h> 15#include <linux/init.h>
17#include <linux/unistd.h> 16#include <linux/unistd.h>
diff --git a/kernel/futex.c b/kernel/futex.c
index 6c91f938005d..15caf93e4a43 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -630,8 +630,10 @@ static int futex_wake(u32 __user *uaddr, int nr_wake)
630 630
631 list_for_each_entry_safe(this, next, head, list) { 631 list_for_each_entry_safe(this, next, head, list) {
632 if (match_futex (&this->key, &key)) { 632 if (match_futex (&this->key, &key)) {
633 if (this->pi_state) 633 if (this->pi_state) {
634 return -EINVAL; 634 ret = -EINVAL;
635 break;
636 }
635 wake_futex(this); 637 wake_futex(this);
636 if (++ret >= nr_wake) 638 if (++ret >= nr_wake)
637 break; 639 break;
@@ -1208,7 +1210,7 @@ static int do_futex_lock_pi(u32 __user *uaddr, int detect, int trylock,
1208 } 1210 }
1209 1211
1210 down_read(&curr->mm->mmap_sem); 1212 down_read(&curr->mm->mmap_sem);
1211 hb = queue_lock(&q, -1, NULL); 1213 spin_lock(q.lock_ptr);
1212 1214
1213 /* 1215 /*
1214 * Got the lock. We might not be the anticipated owner if we 1216 * Got the lock. We might not be the anticipated owner if we
diff --git a/kernel/irq/Makefile b/kernel/irq/Makefile
index 9f77f50d8143..1dab0ac3f797 100644
--- a/kernel/irq/Makefile
+++ b/kernel/irq/Makefile
@@ -1,5 +1,5 @@
1 1
2obj-y := handle.o manage.o spurious.o 2obj-y := handle.o manage.o spurious.o resend.o chip.o
3obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o 3obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o
4obj-$(CONFIG_PROC_FS) += proc.o 4obj-$(CONFIG_PROC_FS) += proc.o
5obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o 5obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o
diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c
index 3467097ca61a..533068cfb607 100644
--- a/kernel/irq/autoprobe.c
+++ b/kernel/irq/autoprobe.c
@@ -11,12 +11,14 @@
11#include <linux/interrupt.h> 11#include <linux/interrupt.h>
12#include <linux/delay.h> 12#include <linux/delay.h>
13 13
14#include "internals.h"
15
14/* 16/*
15 * Autodetection depends on the fact that any interrupt that 17 * Autodetection depends on the fact that any interrupt that
16 * comes in on to an unassigned handler will get stuck with 18 * comes in on to an unassigned handler will get stuck with
17 * "IRQ_WAITING" cleared and the interrupt disabled. 19 * "IRQ_WAITING" cleared and the interrupt disabled.
18 */ 20 */
19static DECLARE_MUTEX(probe_sem); 21static DEFINE_MUTEX(probing_active);
20 22
21/** 23/**
22 * probe_irq_on - begin an interrupt autodetect 24 * probe_irq_on - begin an interrupt autodetect
@@ -27,11 +29,11 @@ static DECLARE_MUTEX(probe_sem);
27 */ 29 */
28unsigned long probe_irq_on(void) 30unsigned long probe_irq_on(void)
29{ 31{
30 unsigned long val; 32 struct irq_desc *desc;
31 irq_desc_t *desc; 33 unsigned long mask;
32 unsigned int i; 34 unsigned int i;
33 35
34 down(&probe_sem); 36 mutex_lock(&probing_active);
35 /* 37 /*
36 * something may have generated an irq long ago and we want to 38 * something may have generated an irq long ago and we want to
37 * flush such a longstanding irq before considering it as spurious. 39 * flush such a longstanding irq before considering it as spurious.
@@ -40,8 +42,21 @@ unsigned long probe_irq_on(void)
40 desc = irq_desc + i; 42 desc = irq_desc + i;
41 43
42 spin_lock_irq(&desc->lock); 44 spin_lock_irq(&desc->lock);
43 if (!irq_desc[i].action) 45 if (!desc->action && !(desc->status & IRQ_NOPROBE)) {
44 irq_desc[i].handler->startup(i); 46 /*
47 * An old-style architecture might still have
48 * the handle_bad_irq handler there:
49 */
50 compat_irq_chip_set_default_handler(desc);
51
52 /*
53 * Some chips need to know about probing in
54 * progress:
55 */
56 if (desc->chip->set_type)
57 desc->chip->set_type(i, IRQ_TYPE_PROBE);
58 desc->chip->startup(i);
59 }
45 spin_unlock_irq(&desc->lock); 60 spin_unlock_irq(&desc->lock);
46 } 61 }
47 62
@@ -57,9 +72,9 @@ unsigned long probe_irq_on(void)
57 desc = irq_desc + i; 72 desc = irq_desc + i;
58 73
59 spin_lock_irq(&desc->lock); 74 spin_lock_irq(&desc->lock);
60 if (!desc->action) { 75 if (!desc->action && !(desc->status & IRQ_NOPROBE)) {
61 desc->status |= IRQ_AUTODETECT | IRQ_WAITING; 76 desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
62 if (desc->handler->startup(i)) 77 if (desc->chip->startup(i))
63 desc->status |= IRQ_PENDING; 78 desc->status |= IRQ_PENDING;
64 } 79 }
65 spin_unlock_irq(&desc->lock); 80 spin_unlock_irq(&desc->lock);
@@ -73,11 +88,11 @@ unsigned long probe_irq_on(void)
73 /* 88 /*
74 * Now filter out any obviously spurious interrupts 89 * Now filter out any obviously spurious interrupts
75 */ 90 */
76 val = 0; 91 mask = 0;
77 for (i = 0; i < NR_IRQS; i++) { 92 for (i = 0; i < NR_IRQS; i++) {
78 irq_desc_t *desc = irq_desc + i;
79 unsigned int status; 93 unsigned int status;
80 94
95 desc = irq_desc + i;
81 spin_lock_irq(&desc->lock); 96 spin_lock_irq(&desc->lock);
82 status = desc->status; 97 status = desc->status;
83 98
@@ -85,17 +100,16 @@ unsigned long probe_irq_on(void)
85 /* It triggered already - consider it spurious. */ 100 /* It triggered already - consider it spurious. */
86 if (!(status & IRQ_WAITING)) { 101 if (!(status & IRQ_WAITING)) {
87 desc->status = status & ~IRQ_AUTODETECT; 102 desc->status = status & ~IRQ_AUTODETECT;
88 desc->handler->shutdown(i); 103 desc->chip->shutdown(i);
89 } else 104 } else
90 if (i < 32) 105 if (i < 32)
91 val |= 1 << i; 106 mask |= 1 << i;
92 } 107 }
93 spin_unlock_irq(&desc->lock); 108 spin_unlock_irq(&desc->lock);
94 } 109 }
95 110
96 return val; 111 return mask;
97} 112}
98
99EXPORT_SYMBOL(probe_irq_on); 113EXPORT_SYMBOL(probe_irq_on);
100 114
101/** 115/**
@@ -117,7 +131,7 @@ unsigned int probe_irq_mask(unsigned long val)
117 131
118 mask = 0; 132 mask = 0;
119 for (i = 0; i < NR_IRQS; i++) { 133 for (i = 0; i < NR_IRQS; i++) {
120 irq_desc_t *desc = irq_desc + i; 134 struct irq_desc *desc = irq_desc + i;
121 unsigned int status; 135 unsigned int status;
122 136
123 spin_lock_irq(&desc->lock); 137 spin_lock_irq(&desc->lock);
@@ -128,11 +142,11 @@ unsigned int probe_irq_mask(unsigned long val)
128 mask |= 1 << i; 142 mask |= 1 << i;
129 143
130 desc->status = status & ~IRQ_AUTODETECT; 144 desc->status = status & ~IRQ_AUTODETECT;
131 desc->handler->shutdown(i); 145 desc->chip->shutdown(i);
132 } 146 }
133 spin_unlock_irq(&desc->lock); 147 spin_unlock_irq(&desc->lock);
134 } 148 }
135 up(&probe_sem); 149 mutex_unlock(&probing_active);
136 150
137 return mask & val; 151 return mask & val;
138} 152}
@@ -160,7 +174,7 @@ int probe_irq_off(unsigned long val)
160 int i, irq_found = 0, nr_irqs = 0; 174 int i, irq_found = 0, nr_irqs = 0;
161 175
162 for (i = 0; i < NR_IRQS; i++) { 176 for (i = 0; i < NR_IRQS; i++) {
163 irq_desc_t *desc = irq_desc + i; 177 struct irq_desc *desc = irq_desc + i;
164 unsigned int status; 178 unsigned int status;
165 179
166 spin_lock_irq(&desc->lock); 180 spin_lock_irq(&desc->lock);
@@ -173,16 +187,16 @@ int probe_irq_off(unsigned long val)
173 nr_irqs++; 187 nr_irqs++;
174 } 188 }
175 desc->status = status & ~IRQ_AUTODETECT; 189 desc->status = status & ~IRQ_AUTODETECT;
176 desc->handler->shutdown(i); 190 desc->chip->shutdown(i);
177 } 191 }
178 spin_unlock_irq(&desc->lock); 192 spin_unlock_irq(&desc->lock);
179 } 193 }
180 up(&probe_sem); 194 mutex_unlock(&probing_active);
181 195
182 if (nr_irqs > 1) 196 if (nr_irqs > 1)
183 irq_found = -irq_found; 197 irq_found = -irq_found;
198
184 return irq_found; 199 return irq_found;
185} 200}
186
187EXPORT_SYMBOL(probe_irq_off); 201EXPORT_SYMBOL(probe_irq_off);
188 202
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
new file mode 100644
index 000000000000..54105bdfe20d
--- /dev/null
+++ b/kernel/irq/chip.c
@@ -0,0 +1,534 @@
1/*
2 * linux/kernel/irq/chip.c
3 *
4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
6 *
7 * This file contains the core interrupt handling code, for irq-chip
8 * based architectures.
9 *
10 * Detailed information is available in Documentation/DocBook/genericirq
11 */
12
13#include <linux/irq.h>
14#include <linux/module.h>
15#include <linux/interrupt.h>
16#include <linux/kernel_stat.h>
17
18#include "internals.h"
19
20/**
21 * set_irq_chip - set the irq chip for an irq
22 * @irq: irq number
23 * @chip: pointer to irq chip description structure
24 */
25int set_irq_chip(unsigned int irq, struct irq_chip *chip)
26{
27 struct irq_desc *desc;
28 unsigned long flags;
29
30 if (irq >= NR_IRQS) {
31 printk(KERN_ERR "Trying to install chip for IRQ%d\n", irq);
32 WARN_ON(1);
33 return -EINVAL;
34 }
35
36 if (!chip)
37 chip = &no_irq_chip;
38
39 desc = irq_desc + irq;
40 spin_lock_irqsave(&desc->lock, flags);
41 irq_chip_set_defaults(chip);
42 desc->chip = chip;
43 /*
44 * For compatibility only:
45 */
46 desc->chip = chip;
47 spin_unlock_irqrestore(&desc->lock, flags);
48
49 return 0;
50}
51EXPORT_SYMBOL(set_irq_chip);
52
53/**
54 * set_irq_type - set the irq type for an irq
55 * @irq: irq number
56 * @type: interrupt type - see include/linux/interrupt.h
57 */
58int set_irq_type(unsigned int irq, unsigned int type)
59{
60 struct irq_desc *desc;
61 unsigned long flags;
62 int ret = -ENXIO;
63
64 if (irq >= NR_IRQS) {
65 printk(KERN_ERR "Trying to set irq type for IRQ%d\n", irq);
66 return -ENODEV;
67 }
68
69 desc = irq_desc + irq;
70 if (desc->chip->set_type) {
71 spin_lock_irqsave(&desc->lock, flags);
72 ret = desc->chip->set_type(irq, type);
73 spin_unlock_irqrestore(&desc->lock, flags);
74 }
75 return ret;
76}
77EXPORT_SYMBOL(set_irq_type);
78
79/**
80 * set_irq_data - set irq type data for an irq
81 * @irq: Interrupt number
82 * @data: Pointer to interrupt specific data
83 *
84 * Set the hardware irq controller data for an irq
85 */
86int set_irq_data(unsigned int irq, void *data)
87{
88 struct irq_desc *desc;
89 unsigned long flags;
90
91 if (irq >= NR_IRQS) {
92 printk(KERN_ERR
93 "Trying to install controller data for IRQ%d\n", irq);
94 return -EINVAL;
95 }
96
97 desc = irq_desc + irq;
98 spin_lock_irqsave(&desc->lock, flags);
99 desc->handler_data = data;
100 spin_unlock_irqrestore(&desc->lock, flags);
101 return 0;
102}
103EXPORT_SYMBOL(set_irq_data);
104
105/**
106 * set_irq_chip_data - set irq chip data for an irq
107 * @irq: Interrupt number
108 * @data: Pointer to chip specific data
109 *
110 * Set the hardware irq chip data for an irq
111 */
112int set_irq_chip_data(unsigned int irq, void *data)
113{
114 struct irq_desc *desc = irq_desc + irq;
115 unsigned long flags;
116
117 if (irq >= NR_IRQS || !desc->chip) {
118 printk(KERN_ERR "BUG: bad set_irq_chip_data(IRQ#%d)\n", irq);
119 return -EINVAL;
120 }
121
122 spin_lock_irqsave(&desc->lock, flags);
123 desc->chip_data = data;
124 spin_unlock_irqrestore(&desc->lock, flags);
125
126 return 0;
127}
128EXPORT_SYMBOL(set_irq_chip_data);
129
130/*
131 * default enable function
132 */
133static void default_enable(unsigned int irq)
134{
135 struct irq_desc *desc = irq_desc + irq;
136
137 desc->chip->unmask(irq);
138 desc->status &= ~IRQ_MASKED;
139}
140
141/*
142 * default disable function
143 */
144static void default_disable(unsigned int irq)
145{
146 struct irq_desc *desc = irq_desc + irq;
147
148 if (!(desc->status & IRQ_DELAYED_DISABLE))
149 irq_desc[irq].chip->mask(irq);
150}
151
152/*
153 * default startup function
154 */
155static unsigned int default_startup(unsigned int irq)
156{
157 irq_desc[irq].chip->enable(irq);
158
159 return 0;
160}
161
162/*
163 * Fixup enable/disable function pointers
164 */
165void irq_chip_set_defaults(struct irq_chip *chip)
166{
167 if (!chip->enable)
168 chip->enable = default_enable;
169 if (!chip->disable)
170 chip->disable = default_disable;
171 if (!chip->startup)
172 chip->startup = default_startup;
173 if (!chip->shutdown)
174 chip->shutdown = chip->disable;
175 if (!chip->name)
176 chip->name = chip->typename;
177}
178
179static inline void mask_ack_irq(struct irq_desc *desc, int irq)
180{
181 if (desc->chip->mask_ack)
182 desc->chip->mask_ack(irq);
183 else {
184 desc->chip->mask(irq);
185 desc->chip->ack(irq);
186 }
187}
188
189/**
190 * handle_simple_irq - Simple and software-decoded IRQs.
191 * @irq: the interrupt number
192 * @desc: the interrupt description structure for this irq
193 * @regs: pointer to a register structure
194 *
195 * Simple interrupts are either sent from a demultiplexing interrupt
196 * handler or come from hardware, where no interrupt hardware control
197 * is necessary.
198 *
199 * Note: The caller is expected to handle the ack, clear, mask and
200 * unmask issues if necessary.
201 */
202void fastcall
203handle_simple_irq(unsigned int irq, struct irq_desc *desc, struct pt_regs *regs)
204{
205 struct irqaction *action;
206 irqreturn_t action_ret;
207 const unsigned int cpu = smp_processor_id();
208
209 spin_lock(&desc->lock);
210
211 if (unlikely(desc->status & IRQ_INPROGRESS))
212 goto out_unlock;
213 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
214 kstat_cpu(cpu).irqs[irq]++;
215
216 action = desc->action;
217 if (unlikely(!action || (desc->status & IRQ_DISABLED)))
218 goto out_unlock;
219
220 desc->status |= IRQ_INPROGRESS;
221 spin_unlock(&desc->lock);
222
223 action_ret = handle_IRQ_event(irq, regs, action);
224 if (!noirqdebug)
225 note_interrupt(irq, desc, action_ret, regs);
226
227 spin_lock(&desc->lock);
228 desc->status &= ~IRQ_INPROGRESS;
229out_unlock:
230 spin_unlock(&desc->lock);
231}
232
233/**
234 * handle_level_irq - Level type irq handler
235 * @irq: the interrupt number
236 * @desc: the interrupt description structure for this irq
237 * @regs: pointer to a register structure
238 *
239 * Level type interrupts are active as long as the hardware line has
240 * the active level. This may require to mask the interrupt and unmask
241 * it after the associated handler has acknowledged the device, so the
242 * interrupt line is back to inactive.
243 */
244void fastcall
245handle_level_irq(unsigned int irq, struct irq_desc *desc, struct pt_regs *regs)
246{
247 unsigned int cpu = smp_processor_id();
248 struct irqaction *action;
249 irqreturn_t action_ret;
250
251 spin_lock(&desc->lock);
252 mask_ack_irq(desc, irq);
253
254 if (unlikely(desc->status & IRQ_INPROGRESS))
255 goto out;
256 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
257 kstat_cpu(cpu).irqs[irq]++;
258
259 /*
260 * If its disabled or no action available
261 * keep it masked and get out of here
262 */
263 action = desc->action;
264 if (unlikely(!action || (desc->status & IRQ_DISABLED)))
265 goto out;
266
267 desc->status |= IRQ_INPROGRESS;
268 spin_unlock(&desc->lock);
269
270 action_ret = handle_IRQ_event(irq, regs, action);
271 if (!noirqdebug)
272 note_interrupt(irq, desc, action_ret, regs);
273
274 spin_lock(&desc->lock);
275 desc->status &= ~IRQ_INPROGRESS;
276out:
277 if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask)
278 desc->chip->unmask(irq);
279 spin_unlock(&desc->lock);
280}
281
282/**
283 * handle_fasteoi_irq - irq handler for transparent controllers
284 * @irq: the interrupt number
285 * @desc: the interrupt description structure for this irq
286 * @regs: pointer to a register structure
287 *
288 * Only a single callback will be issued to the chip: an ->eoi()
289 * call when the interrupt has been serviced. This enables support
290 * for modern forms of interrupt handlers, which handle the flow
291 * details in hardware, transparently.
292 */
293void fastcall
294handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc,
295 struct pt_regs *regs)
296{
297 unsigned int cpu = smp_processor_id();
298 struct irqaction *action;
299 irqreturn_t action_ret;
300
301 spin_lock(&desc->lock);
302
303 if (unlikely(desc->status & IRQ_INPROGRESS))
304 goto out;
305
306 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
307 kstat_cpu(cpu).irqs[irq]++;
308
309 /*
310 * If its disabled or no action available
311 * keep it masked and get out of here
312 */
313 action = desc->action;
314 if (unlikely(!action || (desc->status & IRQ_DISABLED))) {
315 desc->status |= IRQ_PENDING;
316 goto out;
317 }
318
319 desc->status |= IRQ_INPROGRESS;
320 desc->status &= ~IRQ_PENDING;
321 spin_unlock(&desc->lock);
322
323 action_ret = handle_IRQ_event(irq, regs, action);
324 if (!noirqdebug)
325 note_interrupt(irq, desc, action_ret, regs);
326
327 spin_lock(&desc->lock);
328 desc->status &= ~IRQ_INPROGRESS;
329out:
330 desc->chip->eoi(irq);
331
332 spin_unlock(&desc->lock);
333}
334
335/**
336 * handle_edge_irq - edge type IRQ handler
337 * @irq: the interrupt number
338 * @desc: the interrupt description structure for this irq
339 * @regs: pointer to a register structure
340 *
341 * Interrupt occures on the falling and/or rising edge of a hardware
342 * signal. The occurence is latched into the irq controller hardware
343 * and must be acked in order to be reenabled. After the ack another
344 * interrupt can happen on the same source even before the first one
345 * is handled by the assosiacted event handler. If this happens it
346 * might be necessary to disable (mask) the interrupt depending on the
347 * controller hardware. This requires to reenable the interrupt inside
348 * of the loop which handles the interrupts which have arrived while
349 * the handler was running. If all pending interrupts are handled, the
350 * loop is left.
351 */
352void fastcall
353handle_edge_irq(unsigned int irq, struct irq_desc *desc, struct pt_regs *regs)
354{
355 const unsigned int cpu = smp_processor_id();
356
357 spin_lock(&desc->lock);
358
359 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
360
361 /*
362 * If we're currently running this IRQ, or its disabled,
363 * we shouldn't process the IRQ. Mark it pending, handle
364 * the necessary masking and go out
365 */
366 if (unlikely((desc->status & (IRQ_INPROGRESS | IRQ_DISABLED)) ||
367 !desc->action)) {
368 desc->status |= (IRQ_PENDING | IRQ_MASKED);
369 mask_ack_irq(desc, irq);
370 goto out_unlock;
371 }
372
373 kstat_cpu(cpu).irqs[irq]++;
374
375 /* Start handling the irq */
376 desc->chip->ack(irq);
377
378 /* Mark the IRQ currently in progress.*/
379 desc->status |= IRQ_INPROGRESS;
380
381 do {
382 struct irqaction *action = desc->action;
383 irqreturn_t action_ret;
384
385 if (unlikely(!action)) {
386 desc->chip->mask(irq);
387 goto out_unlock;
388 }
389
390 /*
391 * When another irq arrived while we were handling
392 * one, we could have masked the irq.
393 * Renable it, if it was not disabled in meantime.
394 */
395 if (unlikely((desc->status &
396 (IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) ==
397 (IRQ_PENDING | IRQ_MASKED))) {
398 desc->chip->unmask(irq);
399 desc->status &= ~IRQ_MASKED;
400 }
401
402 desc->status &= ~IRQ_PENDING;
403 spin_unlock(&desc->lock);
404 action_ret = handle_IRQ_event(irq, regs, action);
405 if (!noirqdebug)
406 note_interrupt(irq, desc, action_ret, regs);
407 spin_lock(&desc->lock);
408
409 } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING);
410
411 desc->status &= ~IRQ_INPROGRESS;
412out_unlock:
413 spin_unlock(&desc->lock);
414}
415
416#ifdef CONFIG_SMP
417/**
418 * handle_percpu_IRQ - Per CPU local irq handler
419 * @irq: the interrupt number
420 * @desc: the interrupt description structure for this irq
421 * @regs: pointer to a register structure
422 *
423 * Per CPU interrupts on SMP machines without locking requirements
424 */
425void fastcall
426handle_percpu_irq(unsigned int irq, struct irq_desc *desc, struct pt_regs *regs)
427{
428 irqreturn_t action_ret;
429
430 kstat_this_cpu.irqs[irq]++;
431
432 if (desc->chip->ack)
433 desc->chip->ack(irq);
434
435 action_ret = handle_IRQ_event(irq, regs, desc->action);
436 if (!noirqdebug)
437 note_interrupt(irq, desc, action_ret, regs);
438
439 if (desc->chip->eoi)
440 desc->chip->eoi(irq);
441}
442
443#endif /* CONFIG_SMP */
444
445void
446__set_irq_handler(unsigned int irq,
447 void fastcall (*handle)(unsigned int, irq_desc_t *,
448 struct pt_regs *),
449 int is_chained)
450{
451 struct irq_desc *desc;
452 unsigned long flags;
453
454 if (irq >= NR_IRQS) {
455 printk(KERN_ERR
456 "Trying to install type control for IRQ%d\n", irq);
457 return;
458 }
459
460 desc = irq_desc + irq;
461
462 if (!handle)
463 handle = handle_bad_irq;
464
465 if (desc->chip == &no_irq_chip) {
466 printk(KERN_WARNING "Trying to install %sinterrupt handler "
467 "for IRQ%d\n", is_chained ? "chained " : " ", irq);
468 /*
469 * Some ARM implementations install a handler for really dumb
470 * interrupt hardware without setting an irq_chip. This worked
471 * with the ARM no_irq_chip but the check in setup_irq would
472 * prevent us to setup the interrupt at all. Switch it to
473 * dummy_irq_chip for easy transition.
474 */
475 desc->chip = &dummy_irq_chip;
476 }
477
478 spin_lock_irqsave(&desc->lock, flags);
479
480 /* Uninstall? */
481 if (handle == handle_bad_irq) {
482 if (desc->chip != &no_irq_chip) {
483 desc->chip->mask(irq);
484 desc->chip->ack(irq);
485 }
486 desc->status |= IRQ_DISABLED;
487 desc->depth = 1;
488 }
489 desc->handle_irq = handle;
490
491 if (handle != handle_bad_irq && is_chained) {
492 desc->status &= ~IRQ_DISABLED;
493 desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE;
494 desc->depth = 0;
495 desc->chip->unmask(irq);
496 }
497 spin_unlock_irqrestore(&desc->lock, flags);
498}
499
500void
501set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip,
502 void fastcall (*handle)(unsigned int,
503 struct irq_desc *,
504 struct pt_regs *))
505{
506 set_irq_chip(irq, chip);
507 __set_irq_handler(irq, handle, 0);
508}
509
510/*
511 * Get a descriptive string for the highlevel handler, for
512 * /proc/interrupts output:
513 */
514const char *
515handle_irq_name(void fastcall (*handle)(unsigned int, struct irq_desc *,
516 struct pt_regs *))
517{
518 if (handle == handle_level_irq)
519 return "level ";
520 if (handle == handle_fasteoi_irq)
521 return "fasteoi";
522 if (handle == handle_edge_irq)
523 return "edge ";
524 if (handle == handle_simple_irq)
525 return "simple ";
526#ifdef CONFIG_SMP
527 if (handle == handle_percpu_irq)
528 return "percpu ";
529#endif
530 if (handle == handle_bad_irq)
531 return "bad ";
532
533 return NULL;
534}
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 0f6530117105..aeb6e391276c 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -1,9 +1,13 @@
1/* 1/*
2 * linux/kernel/irq/handle.c 2 * linux/kernel/irq/handle.c
3 * 3 *
4 * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar 4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
5 * 6 *
6 * This file contains the core interrupt handling code. 7 * This file contains the core interrupt handling code.
8 *
9 * Detailed information is available in Documentation/DocBook/genericirq
10 *
7 */ 11 */
8 12
9#include <linux/irq.h> 13#include <linux/irq.h>
@@ -14,11 +18,22 @@
14 18
15#include "internals.h" 19#include "internals.h"
16 20
21/**
22 * handle_bad_irq - handle spurious and unhandled irqs
23 */
24void fastcall
25handle_bad_irq(unsigned int irq, struct irq_desc *desc, struct pt_regs *regs)
26{
27 print_irq_desc(irq, desc);
28 kstat_this_cpu.irqs[irq]++;
29 ack_bad_irq(irq);
30}
31
17/* 32/*
18 * Linux has a controller-independent interrupt architecture. 33 * Linux has a controller-independent interrupt architecture.
19 * Every controller has a 'controller-template', that is used 34 * Every controller has a 'controller-template', that is used
20 * by the main code to do the right thing. Each driver-visible 35 * by the main code to do the right thing. Each driver-visible
21 * interrupt source is transparently wired to the apropriate 36 * interrupt source is transparently wired to the appropriate
22 * controller. Thus drivers need not be aware of the 37 * controller. Thus drivers need not be aware of the
23 * interrupt-controller. 38 * interrupt-controller.
24 * 39 *
@@ -28,41 +43,68 @@
28 * 43 *
29 * Controller mappings for all interrupt sources: 44 * Controller mappings for all interrupt sources:
30 */ 45 */
31irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = { 46struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned = {
32 [0 ... NR_IRQS-1] = { 47 [0 ... NR_IRQS-1] = {
33 .status = IRQ_DISABLED, 48 .status = IRQ_DISABLED,
34 .handler = &no_irq_type, 49 .chip = &no_irq_chip,
35 .lock = SPIN_LOCK_UNLOCKED 50 .handle_irq = handle_bad_irq,
51 .depth = 1,
52 .lock = SPIN_LOCK_UNLOCKED,
53#ifdef CONFIG_SMP
54 .affinity = CPU_MASK_ALL
55#endif
36 } 56 }
37}; 57};
38 58
39/* 59/*
40 * Generic 'no controller' code 60 * What should we do if we get a hw irq event on an illegal vector?
61 * Each architecture has to answer this themself.
41 */ 62 */
42static void end_none(unsigned int irq) { } 63static void ack_bad(unsigned int irq)
43static void enable_none(unsigned int irq) { }
44static void disable_none(unsigned int irq) { }
45static void shutdown_none(unsigned int irq) { }
46static unsigned int startup_none(unsigned int irq) { return 0; }
47
48static void ack_none(unsigned int irq)
49{ 64{
50 /* 65 print_irq_desc(irq, irq_desc + irq);
51 * 'what should we do if we get a hw irq event on an illegal vector'.
52 * each architecture has to answer this themself.
53 */
54 ack_bad_irq(irq); 66 ack_bad_irq(irq);
55} 67}
56 68
57struct hw_interrupt_type no_irq_type = { 69/*
58 .typename = "none", 70 * NOP functions
59 .startup = startup_none, 71 */
60 .shutdown = shutdown_none, 72static void noop(unsigned int irq)
61 .enable = enable_none, 73{
62 .disable = disable_none, 74}
63 .ack = ack_none, 75
64 .end = end_none, 76static unsigned int noop_ret(unsigned int irq)
65 .set_affinity = NULL 77{
78 return 0;
79}
80
81/*
82 * Generic no controller implementation
83 */
84struct irq_chip no_irq_chip = {
85 .name = "none",
86 .startup = noop_ret,
87 .shutdown = noop,
88 .enable = noop,
89 .disable = noop,
90 .ack = ack_bad,
91 .end = noop,
92};
93
94/*
95 * Generic dummy implementation which can be used for
96 * real dumb interrupt sources
97 */
98struct irq_chip dummy_irq_chip = {
99 .name = "dummy",
100 .startup = noop_ret,
101 .shutdown = noop,
102 .enable = noop,
103 .disable = noop,
104 .ack = noop,
105 .mask = noop,
106 .unmask = noop,
107 .end = noop,
66}; 108};
67 109
68/* 110/*
@@ -73,16 +115,23 @@ irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs)
73 return IRQ_NONE; 115 return IRQ_NONE;
74} 116}
75 117
76/* 118/**
77 * Have got an event to handle: 119 * handle_IRQ_event - irq action chain handler
120 * @irq: the interrupt number
121 * @regs: pointer to a register structure
122 * @action: the interrupt action chain for this irq
123 *
124 * Handles the action chain of an irq event
78 */ 125 */
79fastcall irqreturn_t handle_IRQ_event(unsigned int irq, struct pt_regs *regs, 126irqreturn_t handle_IRQ_event(unsigned int irq, struct pt_regs *regs,
80 struct irqaction *action) 127 struct irqaction *action)
81{ 128{
82 irqreturn_t ret, retval = IRQ_NONE; 129 irqreturn_t ret, retval = IRQ_NONE;
83 unsigned int status = 0; 130 unsigned int status = 0;
84 131
85 if (!(action->flags & SA_INTERRUPT)) 132 handle_dynamic_tick(action);
133
134 if (!(action->flags & IRQF_DISABLED))
86 local_irq_enable(); 135 local_irq_enable();
87 136
88 do { 137 do {
@@ -93,22 +142,29 @@ fastcall irqreturn_t handle_IRQ_event(unsigned int irq, struct pt_regs *regs,
93 action = action->next; 142 action = action->next;
94 } while (action); 143 } while (action);
95 144
96 if (status & SA_SAMPLE_RANDOM) 145 if (status & IRQF_SAMPLE_RANDOM)
97 add_interrupt_randomness(irq); 146 add_interrupt_randomness(irq);
98 local_irq_disable(); 147 local_irq_disable();
99 148
100 return retval; 149 return retval;
101} 150}
102 151
103/* 152/**
104 * do_IRQ handles all normal device IRQ's (the special 153 * __do_IRQ - original all in one highlevel IRQ handler
154 * @irq: the interrupt number
155 * @regs: pointer to a register structure
156 *
157 * __do_IRQ handles all normal device IRQ's (the special
105 * SMP cross-CPU interrupts have their own specific 158 * SMP cross-CPU interrupts have their own specific
106 * handlers). 159 * handlers).
160 *
161 * This is the original x86 implementation which is used for every
162 * interrupt type.
107 */ 163 */
108fastcall unsigned int __do_IRQ(unsigned int irq, struct pt_regs *regs) 164fastcall unsigned int __do_IRQ(unsigned int irq, struct pt_regs *regs)
109{ 165{
110 irq_desc_t *desc = irq_desc + irq; 166 struct irq_desc *desc = irq_desc + irq;
111 struct irqaction * action; 167 struct irqaction *action;
112 unsigned int status; 168 unsigned int status;
113 169
114 kstat_this_cpu.irqs[irq]++; 170 kstat_this_cpu.irqs[irq]++;
@@ -118,16 +174,16 @@ fastcall unsigned int __do_IRQ(unsigned int irq, struct pt_regs *regs)
118 /* 174 /*
119 * No locking required for CPU-local interrupts: 175 * No locking required for CPU-local interrupts:
120 */ 176 */
121 if (desc->handler->ack) 177 if (desc->chip->ack)
122 desc->handler->ack(irq); 178 desc->chip->ack(irq);
123 action_ret = handle_IRQ_event(irq, regs, desc->action); 179 action_ret = handle_IRQ_event(irq, regs, desc->action);
124 desc->handler->end(irq); 180 desc->chip->end(irq);
125 return 1; 181 return 1;
126 } 182 }
127 183
128 spin_lock(&desc->lock); 184 spin_lock(&desc->lock);
129 if (desc->handler->ack) 185 if (desc->chip->ack)
130 desc->handler->ack(irq); 186 desc->chip->ack(irq);
131 /* 187 /*
132 * REPLAY is when Linux resends an IRQ that was dropped earlier 188 * REPLAY is when Linux resends an IRQ that was dropped earlier
133 * WAITING is used by probe to mark irqs that are being tested 189 * WAITING is used by probe to mark irqs that are being tested
@@ -187,7 +243,7 @@ out:
187 * The ->end() handler has to deal with interrupts which got 243 * The ->end() handler has to deal with interrupts which got
188 * disabled while the handler was running. 244 * disabled while the handler was running.
189 */ 245 */
190 desc->handler->end(irq); 246 desc->chip->end(irq);
191 spin_unlock(&desc->lock); 247 spin_unlock(&desc->lock);
192 248
193 return 1; 249 return 1;
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 46feba630266..08a849a22447 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -4,6 +4,12 @@
4 4
5extern int noirqdebug; 5extern int noirqdebug;
6 6
7/* Set default functions for irq_chip structures: */
8extern void irq_chip_set_defaults(struct irq_chip *chip);
9
10/* Set default handler: */
11extern void compat_irq_chip_set_default_handler(struct irq_desc *desc);
12
7#ifdef CONFIG_PROC_FS 13#ifdef CONFIG_PROC_FS
8extern void register_irq_proc(unsigned int irq); 14extern void register_irq_proc(unsigned int irq);
9extern void register_handler_proc(unsigned int irq, struct irqaction *action); 15extern void register_handler_proc(unsigned int irq, struct irqaction *action);
@@ -16,3 +22,43 @@ static inline void unregister_handler_proc(unsigned int irq,
16 struct irqaction *action) { } 22 struct irqaction *action) { }
17#endif 23#endif
18 24
25/*
26 * Debugging printout:
27 */
28
29#include <linux/kallsyms.h>
30
31#define P(f) if (desc->status & f) printk("%14s set\n", #f)
32
33static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc)
34{
35 printk("irq %d, desc: %p, depth: %d, count: %d, unhandled: %d\n",
36 irq, desc, desc->depth, desc->irq_count, desc->irqs_unhandled);
37 printk("->handle_irq(): %p, ", desc->handle_irq);
38 print_symbol("%s\n", (unsigned long)desc->handle_irq);
39 printk("->chip(): %p, ", desc->chip);
40 print_symbol("%s\n", (unsigned long)desc->chip);
41 printk("->action(): %p\n", desc->action);
42 if (desc->action) {
43 printk("->action->handler(): %p, ", desc->action->handler);
44 print_symbol("%s\n", (unsigned long)desc->action->handler);
45 }
46
47 P(IRQ_INPROGRESS);
48 P(IRQ_DISABLED);
49 P(IRQ_PENDING);
50 P(IRQ_REPLAY);
51 P(IRQ_AUTODETECT);
52 P(IRQ_WAITING);
53 P(IRQ_LEVEL);
54 P(IRQ_MASKED);
55#ifdef CONFIG_IRQ_PER_CPU
56 P(IRQ_PER_CPU);
57#endif
58 P(IRQ_NOPROBE);
59 P(IRQ_NOREQUEST);
60 P(IRQ_NOAUTOEN);
61}
62
63#undef P
64
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 1279e3499534..c911c6ec4dd6 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -1,12 +1,12 @@
1/* 1/*
2 * linux/kernel/irq/manage.c 2 * linux/kernel/irq/manage.c
3 * 3 *
4 * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar 4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5 * Copyright (C) 2005-2006 Thomas Gleixner
5 * 6 *
6 * This file contains driver APIs to the irq subsystem. 7 * This file contains driver APIs to the irq subsystem.
7 */ 8 */
8 9
9#include <linux/config.h>
10#include <linux/irq.h> 10#include <linux/irq.h>
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/random.h> 12#include <linux/random.h>
@@ -16,12 +16,6 @@
16 16
17#ifdef CONFIG_SMP 17#ifdef CONFIG_SMP
18 18
19cpumask_t irq_affinity[NR_IRQS] = { [0 ... NR_IRQS-1] = CPU_MASK_ALL };
20
21#if defined (CONFIG_GENERIC_PENDING_IRQ) || defined (CONFIG_IRQBALANCE)
22cpumask_t __cacheline_aligned pending_irq_cpumask[NR_IRQS];
23#endif
24
25/** 19/**
26 * synchronize_irq - wait for pending IRQ handlers (on other CPUs) 20 * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
27 * @irq: interrupt number to wait for 21 * @irq: interrupt number to wait for
@@ -42,7 +36,6 @@ void synchronize_irq(unsigned int irq)
42 while (desc->status & IRQ_INPROGRESS) 36 while (desc->status & IRQ_INPROGRESS)
43 cpu_relax(); 37 cpu_relax();
44} 38}
45
46EXPORT_SYMBOL(synchronize_irq); 39EXPORT_SYMBOL(synchronize_irq);
47 40
48#endif 41#endif
@@ -60,7 +53,7 @@ EXPORT_SYMBOL(synchronize_irq);
60 */ 53 */
61void disable_irq_nosync(unsigned int irq) 54void disable_irq_nosync(unsigned int irq)
62{ 55{
63 irq_desc_t *desc = irq_desc + irq; 56 struct irq_desc *desc = irq_desc + irq;
64 unsigned long flags; 57 unsigned long flags;
65 58
66 if (irq >= NR_IRQS) 59 if (irq >= NR_IRQS)
@@ -69,11 +62,10 @@ void disable_irq_nosync(unsigned int irq)
69 spin_lock_irqsave(&desc->lock, flags); 62 spin_lock_irqsave(&desc->lock, flags);
70 if (!desc->depth++) { 63 if (!desc->depth++) {
71 desc->status |= IRQ_DISABLED; 64 desc->status |= IRQ_DISABLED;
72 desc->handler->disable(irq); 65 desc->chip->disable(irq);
73 } 66 }
74 spin_unlock_irqrestore(&desc->lock, flags); 67 spin_unlock_irqrestore(&desc->lock, flags);
75} 68}
76
77EXPORT_SYMBOL(disable_irq_nosync); 69EXPORT_SYMBOL(disable_irq_nosync);
78 70
79/** 71/**
@@ -90,7 +82,7 @@ EXPORT_SYMBOL(disable_irq_nosync);
90 */ 82 */
91void disable_irq(unsigned int irq) 83void disable_irq(unsigned int irq)
92{ 84{
93 irq_desc_t *desc = irq_desc + irq; 85 struct irq_desc *desc = irq_desc + irq;
94 86
95 if (irq >= NR_IRQS) 87 if (irq >= NR_IRQS)
96 return; 88 return;
@@ -99,7 +91,6 @@ void disable_irq(unsigned int irq)
99 if (desc->action) 91 if (desc->action)
100 synchronize_irq(irq); 92 synchronize_irq(irq);
101} 93}
102
103EXPORT_SYMBOL(disable_irq); 94EXPORT_SYMBOL(disable_irq);
104 95
105/** 96/**
@@ -114,7 +105,7 @@ EXPORT_SYMBOL(disable_irq);
114 */ 105 */
115void enable_irq(unsigned int irq) 106void enable_irq(unsigned int irq)
116{ 107{
117 irq_desc_t *desc = irq_desc + irq; 108 struct irq_desc *desc = irq_desc + irq;
118 unsigned long flags; 109 unsigned long flags;
119 110
120 if (irq >= NR_IRQS) 111 if (irq >= NR_IRQS)
@@ -123,17 +114,15 @@ void enable_irq(unsigned int irq)
123 spin_lock_irqsave(&desc->lock, flags); 114 spin_lock_irqsave(&desc->lock, flags);
124 switch (desc->depth) { 115 switch (desc->depth) {
125 case 0: 116 case 0:
117 printk(KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
126 WARN_ON(1); 118 WARN_ON(1);
127 break; 119 break;
128 case 1: { 120 case 1: {
129 unsigned int status = desc->status & ~IRQ_DISABLED; 121 unsigned int status = desc->status & ~IRQ_DISABLED;
130 122
131 desc->status = status; 123 /* Prevent probing on this irq: */
132 if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) { 124 desc->status = status | IRQ_NOPROBE;
133 desc->status = status | IRQ_REPLAY; 125 check_irq_resend(desc, irq);
134 hw_resend_irq(desc->handler,irq);
135 }
136 desc->handler->enable(irq);
137 /* fall-through */ 126 /* fall-through */
138 } 127 }
139 default: 128 default:
@@ -141,9 +130,29 @@ void enable_irq(unsigned int irq)
141 } 130 }
142 spin_unlock_irqrestore(&desc->lock, flags); 131 spin_unlock_irqrestore(&desc->lock, flags);
143} 132}
144
145EXPORT_SYMBOL(enable_irq); 133EXPORT_SYMBOL(enable_irq);
146 134
135/**
136 * set_irq_wake - control irq power management wakeup
137 * @irq: interrupt to control
138 * @on: enable/disable power management wakeup
139 *
140 * Enable/disable power management wakeup mode
141 */
142int set_irq_wake(unsigned int irq, unsigned int on)
143{
144 struct irq_desc *desc = irq_desc + irq;
145 unsigned long flags;
146 int ret = -ENXIO;
147
148 spin_lock_irqsave(&desc->lock, flags);
149 if (desc->chip->set_wake)
150 ret = desc->chip->set_wake(irq, on);
151 spin_unlock_irqrestore(&desc->lock, flags);
152 return ret;
153}
154EXPORT_SYMBOL(set_irq_wake);
155
147/* 156/*
148 * Internal function that tells the architecture code whether a 157 * Internal function that tells the architecture code whether a
149 * particular irq has been exclusively allocated or is available 158 * particular irq has been exclusively allocated or is available
@@ -153,22 +162,33 @@ int can_request_irq(unsigned int irq, unsigned long irqflags)
153{ 162{
154 struct irqaction *action; 163 struct irqaction *action;
155 164
156 if (irq >= NR_IRQS) 165 if (irq >= NR_IRQS || irq_desc[irq].status & IRQ_NOREQUEST)
157 return 0; 166 return 0;
158 167
159 action = irq_desc[irq].action; 168 action = irq_desc[irq].action;
160 if (action) 169 if (action)
161 if (irqflags & action->flags & SA_SHIRQ) 170 if (irqflags & action->flags & IRQF_SHARED)
162 action = NULL; 171 action = NULL;
163 172
164 return !action; 173 return !action;
165} 174}
166 175
176void compat_irq_chip_set_default_handler(struct irq_desc *desc)
177{
178 /*
179 * If the architecture still has not overriden
180 * the flow handler then zap the default. This
181 * should catch incorrect flow-type setting.
182 */
183 if (desc->handle_irq == &handle_bad_irq)
184 desc->handle_irq = NULL;
185}
186
167/* 187/*
168 * Internal function to register an irqaction - typically used to 188 * Internal function to register an irqaction - typically used to
169 * allocate special interrupts that are part of the architecture. 189 * allocate special interrupts that are part of the architecture.
170 */ 190 */
171int setup_irq(unsigned int irq, struct irqaction * new) 191int setup_irq(unsigned int irq, struct irqaction *new)
172{ 192{
173 struct irq_desc *desc = irq_desc + irq; 193 struct irq_desc *desc = irq_desc + irq;
174 struct irqaction *old, **p; 194 struct irqaction *old, **p;
@@ -178,14 +198,14 @@ int setup_irq(unsigned int irq, struct irqaction * new)
178 if (irq >= NR_IRQS) 198 if (irq >= NR_IRQS)
179 return -EINVAL; 199 return -EINVAL;
180 200
181 if (desc->handler == &no_irq_type) 201 if (desc->chip == &no_irq_chip)
182 return -ENOSYS; 202 return -ENOSYS;
183 /* 203 /*
184 * Some drivers like serial.c use request_irq() heavily, 204 * Some drivers like serial.c use request_irq() heavily,
185 * so we have to be careful not to interfere with a 205 * so we have to be careful not to interfere with a
186 * running system. 206 * running system.
187 */ 207 */
188 if (new->flags & SA_SAMPLE_RANDOM) { 208 if (new->flags & IRQF_SAMPLE_RANDOM) {
189 /* 209 /*
190 * This function might sleep, we want to call it first, 210 * This function might sleep, we want to call it first,
191 * outside of the atomic block. 211 * outside of the atomic block.
@@ -200,16 +220,24 @@ int setup_irq(unsigned int irq, struct irqaction * new)
200 /* 220 /*
201 * The following block of code has to be executed atomically 221 * The following block of code has to be executed atomically
202 */ 222 */
203 spin_lock_irqsave(&desc->lock,flags); 223 spin_lock_irqsave(&desc->lock, flags);
204 p = &desc->action; 224 p = &desc->action;
205 if ((old = *p) != NULL) { 225 old = *p;
206 /* Can't share interrupts unless both agree to */ 226 if (old) {
207 if (!(old->flags & new->flags & SA_SHIRQ)) 227 /*
228 * Can't share interrupts unless both agree to and are
229 * the same type (level, edge, polarity). So both flag
230 * fields must have IRQF_SHARED set and the bits which
231 * set the trigger type must match.
232 */
233 if (!((old->flags & new->flags) & IRQF_SHARED) ||
234 ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK))
208 goto mismatch; 235 goto mismatch;
209 236
210#if defined(ARCH_HAS_IRQ_PER_CPU) && defined(SA_PERCPU_IRQ) 237#if defined(CONFIG_IRQ_PER_CPU)
211 /* All handlers must agree on per-cpuness */ 238 /* All handlers must agree on per-cpuness */
212 if ((old->flags & IRQ_PER_CPU) != (new->flags & IRQ_PER_CPU)) 239 if ((old->flags & IRQF_PERCPU) !=
240 (new->flags & IRQF_PERCPU))
213 goto mismatch; 241 goto mismatch;
214#endif 242#endif
215 243
@@ -222,20 +250,45 @@ int setup_irq(unsigned int irq, struct irqaction * new)
222 } 250 }
223 251
224 *p = new; 252 *p = new;
225#if defined(ARCH_HAS_IRQ_PER_CPU) && defined(SA_PERCPU_IRQ) 253#if defined(CONFIG_IRQ_PER_CPU)
226 if (new->flags & SA_PERCPU_IRQ) 254 if (new->flags & IRQF_PERCPU)
227 desc->status |= IRQ_PER_CPU; 255 desc->status |= IRQ_PER_CPU;
228#endif 256#endif
229 if (!shared) { 257 if (!shared) {
230 desc->depth = 0; 258 irq_chip_set_defaults(desc->chip);
231 desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | 259
232 IRQ_WAITING | IRQ_INPROGRESS); 260 /* Setup the type (level, edge polarity) if configured: */
233 if (desc->handler->startup) 261 if (new->flags & IRQF_TRIGGER_MASK) {
234 desc->handler->startup(irq); 262 if (desc->chip && desc->chip->set_type)
235 else 263 desc->chip->set_type(irq,
236 desc->handler->enable(irq); 264 new->flags & IRQF_TRIGGER_MASK);
265 else
266 /*
267 * IRQF_TRIGGER_* but the PIC does not support
268 * multiple flow-types?
269 */
270 printk(KERN_WARNING "No IRQF_TRIGGER set_type "
271 "function for IRQ %d (%s)\n", irq,
272 desc->chip ? desc->chip->name :
273 "unknown");
274 } else
275 compat_irq_chip_set_default_handler(desc);
276
277 desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING |
278 IRQ_INPROGRESS);
279
280 if (!(desc->status & IRQ_NOAUTOEN)) {
281 desc->depth = 0;
282 desc->status &= ~IRQ_DISABLED;
283 if (desc->chip->startup)
284 desc->chip->startup(irq);
285 else
286 desc->chip->enable(irq);
287 } else
288 /* Undo nested disables: */
289 desc->depth = 1;
237 } 290 }
238 spin_unlock_irqrestore(&desc->lock,flags); 291 spin_unlock_irqrestore(&desc->lock, flags);
239 292
240 new->irq = irq; 293 new->irq = irq;
241 register_irq_proc(irq); 294 register_irq_proc(irq);
@@ -246,8 +299,8 @@ int setup_irq(unsigned int irq, struct irqaction * new)
246 299
247mismatch: 300mismatch:
248 spin_unlock_irqrestore(&desc->lock, flags); 301 spin_unlock_irqrestore(&desc->lock, flags);
249 if (!(new->flags & SA_PROBEIRQ)) { 302 if (!(new->flags & IRQF_PROBE_SHARED)) {
250 printk(KERN_ERR "%s: irq handler mismatch\n", __FUNCTION__); 303 printk(KERN_ERR "IRQ handler type mismatch for IRQ %d\n", irq);
251 dump_stack(); 304 dump_stack();
252 } 305 }
253 return -EBUSY; 306 return -EBUSY;
@@ -278,10 +331,10 @@ void free_irq(unsigned int irq, void *dev_id)
278 return; 331 return;
279 332
280 desc = irq_desc + irq; 333 desc = irq_desc + irq;
281 spin_lock_irqsave(&desc->lock,flags); 334 spin_lock_irqsave(&desc->lock, flags);
282 p = &desc->action; 335 p = &desc->action;
283 for (;;) { 336 for (;;) {
284 struct irqaction * action = *p; 337 struct irqaction *action = *p;
285 338
286 if (action) { 339 if (action) {
287 struct irqaction **pp = p; 340 struct irqaction **pp = p;
@@ -295,18 +348,18 @@ void free_irq(unsigned int irq, void *dev_id)
295 348
296 /* Currently used only by UML, might disappear one day.*/ 349 /* Currently used only by UML, might disappear one day.*/
297#ifdef CONFIG_IRQ_RELEASE_METHOD 350#ifdef CONFIG_IRQ_RELEASE_METHOD
298 if (desc->handler->release) 351 if (desc->chip->release)
299 desc->handler->release(irq, dev_id); 352 desc->chip->release(irq, dev_id);
300#endif 353#endif
301 354
302 if (!desc->action) { 355 if (!desc->action) {
303 desc->status |= IRQ_DISABLED; 356 desc->status |= IRQ_DISABLED;
304 if (desc->handler->shutdown) 357 if (desc->chip->shutdown)
305 desc->handler->shutdown(irq); 358 desc->chip->shutdown(irq);
306 else 359 else
307 desc->handler->disable(irq); 360 desc->chip->disable(irq);
308 } 361 }
309 spin_unlock_irqrestore(&desc->lock,flags); 362 spin_unlock_irqrestore(&desc->lock, flags);
310 unregister_handler_proc(irq, action); 363 unregister_handler_proc(irq, action);
311 364
312 /* Make sure it's not being used on another CPU */ 365 /* Make sure it's not being used on another CPU */
@@ -314,12 +367,11 @@ void free_irq(unsigned int irq, void *dev_id)
314 kfree(action); 367 kfree(action);
315 return; 368 return;
316 } 369 }
317 printk(KERN_ERR "Trying to free free IRQ%d\n",irq); 370 printk(KERN_ERR "Trying to free already-free IRQ %d\n", irq);
318 spin_unlock_irqrestore(&desc->lock,flags); 371 spin_unlock_irqrestore(&desc->lock, flags);
319 return; 372 return;
320 } 373 }
321} 374}
322
323EXPORT_SYMBOL(free_irq); 375EXPORT_SYMBOL(free_irq);
324 376
325/** 377/**
@@ -346,16 +398,16 @@ EXPORT_SYMBOL(free_irq);
346 * 398 *
347 * Flags: 399 * Flags:
348 * 400 *
349 * SA_SHIRQ Interrupt is shared 401 * IRQF_SHARED Interrupt is shared
350 * SA_INTERRUPT Disable local interrupts while processing 402 * IRQF_DISABLED Disable local interrupts while processing
351 * SA_SAMPLE_RANDOM The interrupt can be used for entropy 403 * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy
352 * 404 *
353 */ 405 */
354int request_irq(unsigned int irq, 406int request_irq(unsigned int irq,
355 irqreturn_t (*handler)(int, void *, struct pt_regs *), 407 irqreturn_t (*handler)(int, void *, struct pt_regs *),
356 unsigned long irqflags, const char * devname, void *dev_id) 408 unsigned long irqflags, const char *devname, void *dev_id)
357{ 409{
358 struct irqaction * action; 410 struct irqaction *action;
359 int retval; 411 int retval;
360 412
361 /* 413 /*
@@ -364,10 +416,12 @@ int request_irq(unsigned int irq,
364 * which interrupt is which (messes up the interrupt freeing 416 * which interrupt is which (messes up the interrupt freeing
365 * logic etc). 417 * logic etc).
366 */ 418 */
367 if ((irqflags & SA_SHIRQ) && !dev_id) 419 if ((irqflags & IRQF_SHARED) && !dev_id)
368 return -EINVAL; 420 return -EINVAL;
369 if (irq >= NR_IRQS) 421 if (irq >= NR_IRQS)
370 return -EINVAL; 422 return -EINVAL;
423 if (irq_desc[irq].status & IRQ_NOREQUEST)
424 return -EINVAL;
371 if (!handler) 425 if (!handler)
372 return -EINVAL; 426 return -EINVAL;
373 427
@@ -390,6 +444,5 @@ int request_irq(unsigned int irq,
390 444
391 return retval; 445 return retval;
392} 446}
393
394EXPORT_SYMBOL(request_irq); 447EXPORT_SYMBOL(request_irq);
395 448
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index a12d00eb5e7c..a57ebe9fa6f6 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -3,19 +3,19 @@
3 3
4void set_pending_irq(unsigned int irq, cpumask_t mask) 4void set_pending_irq(unsigned int irq, cpumask_t mask)
5{ 5{
6 irq_desc_t *desc = irq_desc + irq; 6 struct irq_desc *desc = irq_desc + irq;
7 unsigned long flags; 7 unsigned long flags;
8 8
9 spin_lock_irqsave(&desc->lock, flags); 9 spin_lock_irqsave(&desc->lock, flags);
10 desc->move_irq = 1; 10 desc->move_irq = 1;
11 pending_irq_cpumask[irq] = mask; 11 irq_desc[irq].pending_mask = mask;
12 spin_unlock_irqrestore(&desc->lock, flags); 12 spin_unlock_irqrestore(&desc->lock, flags);
13} 13}
14 14
15void move_native_irq(int irq) 15void move_native_irq(int irq)
16{ 16{
17 struct irq_desc *desc = irq_desc + irq;
17 cpumask_t tmp; 18 cpumask_t tmp;
18 irq_desc_t *desc = irq_descp(irq);
19 19
20 if (likely(!desc->move_irq)) 20 if (likely(!desc->move_irq))
21 return; 21 return;
@@ -30,15 +30,15 @@ void move_native_irq(int irq)
30 30
31 desc->move_irq = 0; 31 desc->move_irq = 0;
32 32
33 if (unlikely(cpus_empty(pending_irq_cpumask[irq]))) 33 if (unlikely(cpus_empty(irq_desc[irq].pending_mask)))
34 return; 34 return;
35 35
36 if (!desc->handler->set_affinity) 36 if (!desc->chip->set_affinity)
37 return; 37 return;
38 38
39 assert_spin_locked(&desc->lock); 39 assert_spin_locked(&desc->lock);
40 40
41 cpus_and(tmp, pending_irq_cpumask[irq], cpu_online_map); 41 cpus_and(tmp, irq_desc[irq].pending_mask, cpu_online_map);
42 42
43 /* 43 /*
44 * If there was a valid mask to work with, please 44 * If there was a valid mask to work with, please
@@ -51,12 +51,12 @@ void move_native_irq(int irq)
51 */ 51 */
52 if (likely(!cpus_empty(tmp))) { 52 if (likely(!cpus_empty(tmp))) {
53 if (likely(!(desc->status & IRQ_DISABLED))) 53 if (likely(!(desc->status & IRQ_DISABLED)))
54 desc->handler->disable(irq); 54 desc->chip->disable(irq);
55 55
56 desc->handler->set_affinity(irq,tmp); 56 desc->chip->set_affinity(irq,tmp);
57 57
58 if (likely(!(desc->status & IRQ_DISABLED))) 58 if (likely(!(desc->status & IRQ_DISABLED)))
59 desc->handler->enable(irq); 59 desc->chip->enable(irq);
60 } 60 }
61 cpus_clear(pending_irq_cpumask[irq]); 61 cpus_clear(irq_desc[irq].pending_mask);
62} 62}
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index afacd6f585fa..607c7809ad01 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -12,15 +12,10 @@
12 12
13#include "internals.h" 13#include "internals.h"
14 14
15static struct proc_dir_entry *root_irq_dir, *irq_dir[NR_IRQS]; 15static struct proc_dir_entry *root_irq_dir;
16 16
17#ifdef CONFIG_SMP 17#ifdef CONFIG_SMP
18 18
19/*
20 * The /proc/irq/<irq>/smp_affinity values:
21 */
22static struct proc_dir_entry *smp_affinity_entry[NR_IRQS];
23
24#ifdef CONFIG_GENERIC_PENDING_IRQ 19#ifdef CONFIG_GENERIC_PENDING_IRQ
25void proc_set_irq_affinity(unsigned int irq, cpumask_t mask_val) 20void proc_set_irq_affinity(unsigned int irq, cpumask_t mask_val)
26{ 21{
@@ -36,15 +31,15 @@ void proc_set_irq_affinity(unsigned int irq, cpumask_t mask_val)
36void proc_set_irq_affinity(unsigned int irq, cpumask_t mask_val) 31void proc_set_irq_affinity(unsigned int irq, cpumask_t mask_val)
37{ 32{
38 set_balance_irq_affinity(irq, mask_val); 33 set_balance_irq_affinity(irq, mask_val);
39 irq_affinity[irq] = mask_val; 34 irq_desc[irq].affinity = mask_val;
40 irq_desc[irq].handler->set_affinity(irq, mask_val); 35 irq_desc[irq].chip->set_affinity(irq, mask_val);
41} 36}
42#endif 37#endif
43 38
44static int irq_affinity_read_proc(char *page, char **start, off_t off, 39static int irq_affinity_read_proc(char *page, char **start, off_t off,
45 int count, int *eof, void *data) 40 int count, int *eof, void *data)
46{ 41{
47 int len = cpumask_scnprintf(page, count, irq_affinity[(long)data]); 42 int len = cpumask_scnprintf(page, count, irq_desc[(long)data].affinity);
48 43
49 if (count - len < 2) 44 if (count - len < 2)
50 return -EINVAL; 45 return -EINVAL;
@@ -59,7 +54,7 @@ static int irq_affinity_write_proc(struct file *file, const char __user *buffer,
59 unsigned int irq = (int)(long)data, full_count = count, err; 54 unsigned int irq = (int)(long)data, full_count = count, err;
60 cpumask_t new_value, tmp; 55 cpumask_t new_value, tmp;
61 56
62 if (!irq_desc[irq].handler->set_affinity || no_irq_affinity) 57 if (!irq_desc[irq].chip->set_affinity || no_irq_affinity)
63 return -EIO; 58 return -EIO;
64 59
65 err = cpumask_parse(buffer, count, new_value); 60 err = cpumask_parse(buffer, count, new_value);
@@ -102,7 +97,7 @@ void register_handler_proc(unsigned int irq, struct irqaction *action)
102{ 97{
103 char name [MAX_NAMELEN]; 98 char name [MAX_NAMELEN];
104 99
105 if (!irq_dir[irq] || action->dir || !action->name || 100 if (!irq_desc[irq].dir || action->dir || !action->name ||
106 !name_unique(irq, action)) 101 !name_unique(irq, action))
107 return; 102 return;
108 103
@@ -110,7 +105,7 @@ void register_handler_proc(unsigned int irq, struct irqaction *action)
110 snprintf(name, MAX_NAMELEN, "%s", action->name); 105 snprintf(name, MAX_NAMELEN, "%s", action->name);
111 106
112 /* create /proc/irq/1234/handler/ */ 107 /* create /proc/irq/1234/handler/ */
113 action->dir = proc_mkdir(name, irq_dir[irq]); 108 action->dir = proc_mkdir(name, irq_desc[irq].dir);
114} 109}
115 110
116#undef MAX_NAMELEN 111#undef MAX_NAMELEN
@@ -122,22 +117,22 @@ void register_irq_proc(unsigned int irq)
122 char name [MAX_NAMELEN]; 117 char name [MAX_NAMELEN];
123 118
124 if (!root_irq_dir || 119 if (!root_irq_dir ||
125 (irq_desc[irq].handler == &no_irq_type) || 120 (irq_desc[irq].chip == &no_irq_chip) ||
126 irq_dir[irq]) 121 irq_desc[irq].dir)
127 return; 122 return;
128 123
129 memset(name, 0, MAX_NAMELEN); 124 memset(name, 0, MAX_NAMELEN);
130 sprintf(name, "%d", irq); 125 sprintf(name, "%d", irq);
131 126
132 /* create /proc/irq/1234 */ 127 /* create /proc/irq/1234 */
133 irq_dir[irq] = proc_mkdir(name, root_irq_dir); 128 irq_desc[irq].dir = proc_mkdir(name, root_irq_dir);
134 129
135#ifdef CONFIG_SMP 130#ifdef CONFIG_SMP
136 { 131 {
137 struct proc_dir_entry *entry; 132 struct proc_dir_entry *entry;
138 133
139 /* create /proc/irq/<irq>/smp_affinity */ 134 /* create /proc/irq/<irq>/smp_affinity */
140 entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]); 135 entry = create_proc_entry("smp_affinity", 0600, irq_desc[irq].dir);
141 136
142 if (entry) { 137 if (entry) {
143 entry->nlink = 1; 138 entry->nlink = 1;
@@ -145,7 +140,6 @@ void register_irq_proc(unsigned int irq)
145 entry->read_proc = irq_affinity_read_proc; 140 entry->read_proc = irq_affinity_read_proc;
146 entry->write_proc = irq_affinity_write_proc; 141 entry->write_proc = irq_affinity_write_proc;
147 } 142 }
148 smp_affinity_entry[irq] = entry;
149 } 143 }
150#endif 144#endif
151} 145}
@@ -155,7 +149,7 @@ void register_irq_proc(unsigned int irq)
155void unregister_handler_proc(unsigned int irq, struct irqaction *action) 149void unregister_handler_proc(unsigned int irq, struct irqaction *action)
156{ 150{
157 if (action->dir) 151 if (action->dir)
158 remove_proc_entry(action->dir->name, irq_dir[irq]); 152 remove_proc_entry(action->dir->name, irq_desc[irq].dir);
159} 153}
160 154
161void init_irq_proc(void) 155void init_irq_proc(void)
diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c
new file mode 100644
index 000000000000..872f91ba2ce8
--- /dev/null
+++ b/kernel/irq/resend.c
@@ -0,0 +1,78 @@
1/*
2 * linux/kernel/irq/resend.c
3 *
4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5 * Copyright (C) 2005-2006, Thomas Gleixner
6 *
7 * This file contains the IRQ-resend code
8 *
9 * If the interrupt is waiting to be processed, we try to re-run it.
10 * We can't directly run it from here since the caller might be in an
11 * interrupt-protected region. Not all irq controller chips can
12 * retrigger interrupts at the hardware level, so in those cases
13 * we allow the resending of IRQs via a tasklet.
14 */
15
16#include <linux/irq.h>
17#include <linux/module.h>
18#include <linux/random.h>
19#include <linux/interrupt.h>
20
21#include "internals.h"
22
23#ifdef CONFIG_HARDIRQS_SW_RESEND
24
25/* Bitmap to handle software resend of interrupts: */
26static DECLARE_BITMAP(irqs_resend, NR_IRQS);
27
28/*
29 * Run software resends of IRQ's
30 */
31static void resend_irqs(unsigned long arg)
32{
33 struct irq_desc *desc;
34 int irq;
35
36 while (!bitmap_empty(irqs_resend, NR_IRQS)) {
37 irq = find_first_bit(irqs_resend, NR_IRQS);
38 clear_bit(irq, irqs_resend);
39 desc = irq_desc + irq;
40 local_irq_disable();
41 desc->handle_irq(irq, desc, NULL);
42 local_irq_enable();
43 }
44}
45
46/* Tasklet to handle resend: */
47static DECLARE_TASKLET(resend_tasklet, resend_irqs, 0);
48
49#endif
50
51/*
52 * IRQ resend
53 *
54 * Is called with interrupts disabled and desc->lock held.
55 */
56void check_irq_resend(struct irq_desc *desc, unsigned int irq)
57{
58 unsigned int status = desc->status;
59
60 /*
61 * Make sure the interrupt is enabled, before resending it:
62 */
63 desc->chip->enable(irq);
64
65 if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
66 desc->status &= ~IRQ_PENDING;
67 desc->status = status | IRQ_REPLAY;
68
69 if (!desc->chip || !desc->chip->retrigger ||
70 !desc->chip->retrigger(irq)) {
71#ifdef CONFIG_HARDIRQS_SW_RESEND
72 /* Set it pending and activate the softirq: */
73 set_bit(irq, irqs_resend);
74 tasklet_schedule(&resend_tasklet);
75#endif
76 }
77 }
78}
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index b2fb3c18d06b..417e98092cf2 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -16,39 +16,39 @@ static int irqfixup __read_mostly;
16/* 16/*
17 * Recovery handler for misrouted interrupts. 17 * Recovery handler for misrouted interrupts.
18 */ 18 */
19
20static int misrouted_irq(int irq, struct pt_regs *regs) 19static int misrouted_irq(int irq, struct pt_regs *regs)
21{ 20{
22 int i; 21 int i;
23 irq_desc_t *desc;
24 int ok = 0; 22 int ok = 0;
25 int work = 0; /* Did we do work for a real IRQ */ 23 int work = 0; /* Did we do work for a real IRQ */
26 24
27 for(i = 1; i < NR_IRQS; i++) { 25 for (i = 1; i < NR_IRQS; i++) {
26 struct irq_desc *desc = irq_desc + i;
28 struct irqaction *action; 27 struct irqaction *action;
29 28
30 if (i == irq) /* Already tried */ 29 if (i == irq) /* Already tried */
31 continue; 30 continue;
32 desc = &irq_desc[i]; 31
33 spin_lock(&desc->lock); 32 spin_lock(&desc->lock);
34 action = desc->action;
35 /* Already running on another processor */ 33 /* Already running on another processor */
36 if (desc->status & IRQ_INPROGRESS) { 34 if (desc->status & IRQ_INPROGRESS) {
37 /* 35 /*
38 * Already running: If it is shared get the other 36 * Already running: If it is shared get the other
39 * CPU to go looking for our mystery interrupt too 37 * CPU to go looking for our mystery interrupt too
40 */ 38 */
41 if (desc->action && (desc->action->flags & SA_SHIRQ)) 39 if (desc->action && (desc->action->flags & IRQF_SHARED))
42 desc->status |= IRQ_PENDING; 40 desc->status |= IRQ_PENDING;
43 spin_unlock(&desc->lock); 41 spin_unlock(&desc->lock);
44 continue; 42 continue;
45 } 43 }
46 /* Honour the normal IRQ locking */ 44 /* Honour the normal IRQ locking */
47 desc->status |= IRQ_INPROGRESS; 45 desc->status |= IRQ_INPROGRESS;
46 action = desc->action;
48 spin_unlock(&desc->lock); 47 spin_unlock(&desc->lock);
48
49 while (action) { 49 while (action) {
50 /* Only shared IRQ handlers are safe to call */ 50 /* Only shared IRQ handlers are safe to call */
51 if (action->flags & SA_SHIRQ) { 51 if (action->flags & IRQF_SHARED) {
52 if (action->handler(i, action->dev_id, regs) == 52 if (action->handler(i, action->dev_id, regs) ==
53 IRQ_HANDLED) 53 IRQ_HANDLED)
54 ok = 1; 54 ok = 1;
@@ -62,9 +62,8 @@ static int misrouted_irq(int irq, struct pt_regs *regs)
62 62
63 /* 63 /*
64 * While we were looking for a fixup someone queued a real 64 * While we were looking for a fixup someone queued a real
65 * IRQ clashing with our walk 65 * IRQ clashing with our walk:
66 */ 66 */
67
68 while ((desc->status & IRQ_PENDING) && action) { 67 while ((desc->status & IRQ_PENDING) && action) {
69 /* 68 /*
70 * Perform real IRQ processing for the IRQ we deferred 69 * Perform real IRQ processing for the IRQ we deferred
@@ -80,8 +79,8 @@ static int misrouted_irq(int irq, struct pt_regs *regs)
80 * If we did actual work for the real IRQ line we must let the 79 * If we did actual work for the real IRQ line we must let the
81 * IRQ controller clean up too 80 * IRQ controller clean up too
82 */ 81 */
83 if(work) 82 if (work && desc->chip && desc->chip->end)
84 desc->handler->end(i); 83 desc->chip->end(i);
85 spin_unlock(&desc->lock); 84 spin_unlock(&desc->lock);
86 } 85 }
87 /* So the caller can adjust the irq error counts */ 86 /* So the caller can adjust the irq error counts */
@@ -100,7 +99,8 @@ static int misrouted_irq(int irq, struct pt_regs *regs)
100 */ 99 */
101 100
102static void 101static void
103__report_bad_irq(unsigned int irq, irq_desc_t *desc, irqreturn_t action_ret) 102__report_bad_irq(unsigned int irq, struct irq_desc *desc,
103 irqreturn_t action_ret)
104{ 104{
105 struct irqaction *action; 105 struct irqaction *action;
106 106
@@ -113,6 +113,7 @@ __report_bad_irq(unsigned int irq, irq_desc_t *desc, irqreturn_t action_ret)
113 } 113 }
114 dump_stack(); 114 dump_stack();
115 printk(KERN_ERR "handlers:\n"); 115 printk(KERN_ERR "handlers:\n");
116
116 action = desc->action; 117 action = desc->action;
117 while (action) { 118 while (action) {
118 printk(KERN_ERR "[<%p>]", action->handler); 119 printk(KERN_ERR "[<%p>]", action->handler);
@@ -123,7 +124,8 @@ __report_bad_irq(unsigned int irq, irq_desc_t *desc, irqreturn_t action_ret)
123 } 124 }
124} 125}
125 126
126static void report_bad_irq(unsigned int irq, irq_desc_t *desc, irqreturn_t action_ret) 127static void
128report_bad_irq(unsigned int irq, struct irq_desc *desc, irqreturn_t action_ret)
127{ 129{
128 static int count = 100; 130 static int count = 100;
129 131
@@ -133,8 +135,8 @@ static void report_bad_irq(unsigned int irq, irq_desc_t *desc, irqreturn_t actio
133 } 135 }
134} 136}
135 137
136void note_interrupt(unsigned int irq, irq_desc_t *desc, irqreturn_t action_ret, 138void note_interrupt(unsigned int irq, struct irq_desc *desc,
137 struct pt_regs *regs) 139 irqreturn_t action_ret, struct pt_regs *regs)
138{ 140{
139 if (unlikely(action_ret != IRQ_HANDLED)) { 141 if (unlikely(action_ret != IRQ_HANDLED)) {
140 desc->irqs_unhandled++; 142 desc->irqs_unhandled++;
@@ -166,7 +168,8 @@ void note_interrupt(unsigned int irq, irq_desc_t *desc, irqreturn_t action_ret,
166 */ 168 */
167 printk(KERN_EMERG "Disabling IRQ #%d\n", irq); 169 printk(KERN_EMERG "Disabling IRQ #%d\n", irq);
168 desc->status |= IRQ_DISABLED; 170 desc->status |= IRQ_DISABLED;
169 desc->handler->disable(irq); 171 desc->depth = 1;
172 desc->chip->disable(irq);
170 } 173 }
171 desc->irqs_unhandled = 0; 174 desc->irqs_unhandled = 0;
172} 175}
@@ -177,6 +180,7 @@ int __init noirqdebug_setup(char *str)
177{ 180{
178 noirqdebug = 1; 181 noirqdebug = 1;
179 printk(KERN_INFO "IRQ lockup detection disabled\n"); 182 printk(KERN_INFO "IRQ lockup detection disabled\n");
183
180 return 1; 184 return 1;
181} 185}
182 186
@@ -187,6 +191,7 @@ static int __init irqfixup_setup(char *str)
187 irqfixup = 1; 191 irqfixup = 1;
188 printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n"); 192 printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n");
189 printk(KERN_WARNING "This may impact system performance.\n"); 193 printk(KERN_WARNING "This may impact system performance.\n");
194
190 return 1; 195 return 1;
191} 196}
192 197
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 58f0f382597c..50087ecf337e 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -1042,7 +1042,6 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
1042 1042
1043void crash_kexec(struct pt_regs *regs) 1043void crash_kexec(struct pt_regs *regs)
1044{ 1044{
1045 struct kimage *image;
1046 int locked; 1045 int locked;
1047 1046
1048 1047
@@ -1056,12 +1055,11 @@ void crash_kexec(struct pt_regs *regs)
1056 */ 1055 */
1057 locked = xchg(&kexec_lock, 1); 1056 locked = xchg(&kexec_lock, 1);
1058 if (!locked) { 1057 if (!locked) {
1059 image = xchg(&kexec_crash_image, NULL); 1058 if (kexec_crash_image) {
1060 if (image) {
1061 struct pt_regs fixed_regs; 1059 struct pt_regs fixed_regs;
1062 crash_setup_regs(&fixed_regs, regs); 1060 crash_setup_regs(&fixed_regs, regs);
1063 machine_crash_shutdown(&fixed_regs); 1061 machine_crash_shutdown(&fixed_regs);
1064 machine_kexec(image); 1062 machine_kexec(kexec_crash_image);
1065 } 1063 }
1066 xchg(&kexec_lock, 0); 1064 xchg(&kexec_lock, 0);
1067 } 1065 }
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 20a997c73c3d..1b7157af051c 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -20,7 +20,6 @@
20*/ 20*/
21#define __KERNEL_SYSCALLS__ 21#define __KERNEL_SYSCALLS__
22 22
23#include <linux/config.h>
24#include <linux/module.h> 23#include <linux/module.h>
25#include <linux/sched.h> 24#include <linux/sched.h>
26#include <linux/syscalls.h> 25#include <linux/syscalls.h>
diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
index 9e28478a17a5..e0ffe4ab0917 100644
--- a/kernel/ksysfs.c
+++ b/kernel/ksysfs.c
@@ -8,7 +8,6 @@
8 * 8 *
9 */ 9 */
10 10
11#include <linux/config.h>
12#include <linux/kobject.h> 11#include <linux/kobject.h>
13#include <linux/string.h> 12#include <linux/string.h>
14#include <linux/sysfs.h> 13#include <linux/sysfs.h>
diff --git a/kernel/module.c b/kernel/module.c
index 99c022ac3d21..281172f01e9a 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -16,7 +16,6 @@
16 along with this program; if not, write to the Free Software 16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18*/ 18*/
19#include <linux/config.h>
20#include <linux/module.h> 19#include <linux/module.h>
21#include <linux/moduleloader.h> 20#include <linux/moduleloader.h>
22#include <linux/init.h> 21#include <linux/init.h>
diff --git a/kernel/panic.c b/kernel/panic.c
index cc2a4c9c36ac..ab13f0f668b5 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -8,7 +8,6 @@
8 * This function is used through-out the kernel (including mm and fs) 8 * This function is used through-out the kernel (including mm and fs)
9 * to indicate a major problem. 9 * to indicate a major problem.
10 */ 10 */
11#include <linux/config.h>
12#include <linux/module.h> 11#include <linux/module.h>
13#include <linux/sched.h> 12#include <linux/sched.h>
14#include <linux/delay.h> 13#include <linux/delay.h>
diff --git a/kernel/params.c b/kernel/params.c
index af43ecdc8d9b..91aea7aa532e 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -15,7 +15,6 @@
15 along with this program; if not, write to the Free Software 15 along with this program; if not, write to the Free Software
16 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 16 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17*/ 17*/
18#include <linux/config.h>
19#include <linux/moduleparam.h> 18#include <linux/moduleparam.h>
20#include <linux/kernel.h> 19#include <linux/kernel.h>
21#include <linux/string.h> 20#include <linux/string.h>
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 857b4fa09124..ae44a70aae8a 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -100,18 +100,6 @@ config PM_STD_PARTITION
100 suspended image to. It will simply pick the first available swap 100 suspended image to. It will simply pick the first available swap
101 device. 101 device.
102 102
103config SWSUSP_ENCRYPT
104 bool "Encrypt suspend image"
105 depends on SOFTWARE_SUSPEND && CRYPTO=y && (CRYPTO_AES=y || CRYPTO_AES_586=y || CRYPTO_AES_X86_64=y)
106 default ""
107 ---help---
108 To prevent data gathering from swap after resume you can encrypt
109 the suspend image with a temporary key that is deleted on
110 resume.
111
112 Note that the temporary key is stored unencrypted on disk while the
113 system is suspended.
114
115config SUSPEND_SMP 103config SUSPEND_SMP
116 bool 104 bool
117 depends on HOTPLUG_CPU && X86 && PM 105 depends on HOTPLUG_CPU && X86 && PM
diff --git a/kernel/printk.c b/kernel/printk.c
index 95b7fe17f124..39ae24d2a415 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -26,7 +26,6 @@
26#include <linux/module.h> 26#include <linux/module.h>
27#include <linux/moduleparam.h> 27#include <linux/moduleparam.h>
28#include <linux/interrupt.h> /* For in_interrupt() */ 28#include <linux/interrupt.h> /* For in_interrupt() */
29#include <linux/config.h>
30#include <linux/delay.h> 29#include <linux/delay.h>
31#include <linux/smp.h> 30#include <linux/smp.h>
32#include <linux/security.h> 31#include <linux/security.h>
diff --git a/kernel/profile.c b/kernel/profile.c
index 5a730fdb1a2c..d5bd75e7501c 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -13,7 +13,6 @@
13 * to resolve timer interrupt livelocks, William Irwin, Oracle, 2004 13 * to resolve timer interrupt livelocks, William Irwin, Oracle, 2004
14 */ 14 */
15 15
16#include <linux/config.h>
17#include <linux/module.h> 16#include <linux/module.h>
18#include <linux/profile.h> 17#include <linux/profile.h>
19#include <linux/bootmem.h> 18#include <linux/bootmem.h>
diff --git a/kernel/resource.c b/kernel/resource.c
index 2404f9b0bc47..129cf046e561 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -7,7 +7,6 @@
7 * Arbitrary resource management. 7 * Arbitrary resource management.
8 */ 8 */
9 9
10#include <linux/config.h>
11#include <linux/module.h> 10#include <linux/module.h>
12#include <linux/sched.h> 11#include <linux/sched.h>
13#include <linux/errno.h> 12#include <linux/errno.h>
@@ -23,20 +22,18 @@
23 22
24struct resource ioport_resource = { 23struct resource ioport_resource = {
25 .name = "PCI IO", 24 .name = "PCI IO",
26 .start = 0x0000, 25 .start = 0,
27 .end = IO_SPACE_LIMIT, 26 .end = IO_SPACE_LIMIT,
28 .flags = IORESOURCE_IO, 27 .flags = IORESOURCE_IO,
29}; 28};
30
31EXPORT_SYMBOL(ioport_resource); 29EXPORT_SYMBOL(ioport_resource);
32 30
33struct resource iomem_resource = { 31struct resource iomem_resource = {
34 .name = "PCI mem", 32 .name = "PCI mem",
35 .start = 0UL, 33 .start = 0,
36 .end = ~0UL, 34 .end = -1,
37 .flags = IORESOURCE_MEM, 35 .flags = IORESOURCE_MEM,
38}; 36};
39
40EXPORT_SYMBOL(iomem_resource); 37EXPORT_SYMBOL(iomem_resource);
41 38
42static DEFINE_RWLOCK(resource_lock); 39static DEFINE_RWLOCK(resource_lock);
@@ -83,10 +80,10 @@ static int r_show(struct seq_file *m, void *v)
83 for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent) 80 for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent)
84 if (p->parent == root) 81 if (p->parent == root)
85 break; 82 break;
86 seq_printf(m, "%*s%0*lx-%0*lx : %s\n", 83 seq_printf(m, "%*s%0*llx-%0*llx : %s\n",
87 depth * 2, "", 84 depth * 2, "",
88 width, r->start, 85 width, (unsigned long long) r->start,
89 width, r->end, 86 width, (unsigned long long) r->end,
90 r->name ? r->name : "<BAD>"); 87 r->name ? r->name : "<BAD>");
91 return 0; 88 return 0;
92} 89}
@@ -151,8 +148,8 @@ __initcall(ioresources_init);
151/* Return the conflict entry if you can't request it */ 148/* Return the conflict entry if you can't request it */
152static struct resource * __request_resource(struct resource *root, struct resource *new) 149static struct resource * __request_resource(struct resource *root, struct resource *new)
153{ 150{
154 unsigned long start = new->start; 151 resource_size_t start = new->start;
155 unsigned long end = new->end; 152 resource_size_t end = new->end;
156 struct resource *tmp, **p; 153 struct resource *tmp, **p;
157 154
158 if (end < start) 155 if (end < start)
@@ -274,11 +271,10 @@ int find_next_system_ram(struct resource *res)
274 * Find empty slot in the resource tree given range and alignment. 271 * Find empty slot in the resource tree given range and alignment.
275 */ 272 */
276static int find_resource(struct resource *root, struct resource *new, 273static int find_resource(struct resource *root, struct resource *new,
277 unsigned long size, 274 resource_size_t size, resource_size_t min,
278 unsigned long min, unsigned long max, 275 resource_size_t max, resource_size_t align,
279 unsigned long align,
280 void (*alignf)(void *, struct resource *, 276 void (*alignf)(void *, struct resource *,
281 unsigned long, unsigned long), 277 resource_size_t, resource_size_t),
282 void *alignf_data) 278 void *alignf_data)
283{ 279{
284 struct resource *this = root->child; 280 struct resource *this = root->child;
@@ -320,11 +316,10 @@ static int find_resource(struct resource *root, struct resource *new,
320 * Allocate empty slot in the resource tree given range and alignment. 316 * Allocate empty slot in the resource tree given range and alignment.
321 */ 317 */
322int allocate_resource(struct resource *root, struct resource *new, 318int allocate_resource(struct resource *root, struct resource *new,
323 unsigned long size, 319 resource_size_t size, resource_size_t min,
324 unsigned long min, unsigned long max, 320 resource_size_t max, resource_size_t align,
325 unsigned long align,
326 void (*alignf)(void *, struct resource *, 321 void (*alignf)(void *, struct resource *,
327 unsigned long, unsigned long), 322 resource_size_t, resource_size_t),
328 void *alignf_data) 323 void *alignf_data)
329{ 324{
330 int err; 325 int err;
@@ -416,10 +411,10 @@ EXPORT_SYMBOL(insert_resource);
416 * arguments. Returns -EBUSY if it can't fit. Existing children of 411 * arguments. Returns -EBUSY if it can't fit. Existing children of
417 * the resource are assumed to be immutable. 412 * the resource are assumed to be immutable.
418 */ 413 */
419int adjust_resource(struct resource *res, unsigned long start, unsigned long size) 414int adjust_resource(struct resource *res, resource_size_t start, resource_size_t size)
420{ 415{
421 struct resource *tmp, *parent = res->parent; 416 struct resource *tmp, *parent = res->parent;
422 unsigned long end = start + size - 1; 417 resource_size_t end = start + size - 1;
423 int result = -EBUSY; 418 int result = -EBUSY;
424 419
425 write_lock(&resource_lock); 420 write_lock(&resource_lock);
@@ -466,7 +461,9 @@ EXPORT_SYMBOL(adjust_resource);
466 * 461 *
467 * Release-region releases a matching busy region. 462 * Release-region releases a matching busy region.
468 */ 463 */
469struct resource * __request_region(struct resource *parent, unsigned long start, unsigned long n, const char *name) 464struct resource * __request_region(struct resource *parent,
465 resource_size_t start, resource_size_t n,
466 const char *name)
470{ 467{
471 struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL); 468 struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL);
472 469
@@ -502,7 +499,8 @@ struct resource * __request_region(struct resource *parent, unsigned long start,
502 499
503EXPORT_SYMBOL(__request_region); 500EXPORT_SYMBOL(__request_region);
504 501
505int __check_region(struct resource *parent, unsigned long start, unsigned long n) 502int __check_region(struct resource *parent, resource_size_t start,
503 resource_size_t n)
506{ 504{
507 struct resource * res; 505 struct resource * res;
508 506
@@ -517,10 +515,11 @@ int __check_region(struct resource *parent, unsigned long start, unsigned long n
517 515
518EXPORT_SYMBOL(__check_region); 516EXPORT_SYMBOL(__check_region);
519 517
520void __release_region(struct resource *parent, unsigned long start, unsigned long n) 518void __release_region(struct resource *parent, resource_size_t start,
519 resource_size_t n)
521{ 520{
522 struct resource **p; 521 struct resource **p;
523 unsigned long end; 522 resource_size_t end;
524 523
525 p = &parent->child; 524 p = &parent->child;
526 end = start + n - 1; 525 end = start + n - 1;
@@ -549,7 +548,9 @@ void __release_region(struct resource *parent, unsigned long start, unsigned lon
549 548
550 write_unlock(&resource_lock); 549 write_unlock(&resource_lock);
551 550
552 printk(KERN_WARNING "Trying to free nonexistent resource <%08lx-%08lx>\n", start, end); 551 printk(KERN_WARNING "Trying to free nonexistent resource "
552 "<%016llx-%016llx>\n", (unsigned long long)start,
553 (unsigned long long)end);
553} 554}
554 555
555EXPORT_SYMBOL(__release_region); 556EXPORT_SYMBOL(__release_region);
diff --git a/kernel/sched.c b/kernel/sched.c
index 2629c1711fd6..d5e37072ea54 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4386,7 +4386,16 @@ asmlinkage long sys_sched_yield(void)
4386 return 0; 4386 return 0;
4387} 4387}
4388 4388
4389static inline void __cond_resched(void) 4389static inline int __resched_legal(void)
4390{
4391 if (unlikely(preempt_count()))
4392 return 0;
4393 if (unlikely(system_state != SYSTEM_RUNNING))
4394 return 0;
4395 return 1;
4396}
4397
4398static void __cond_resched(void)
4390{ 4399{
4391#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP 4400#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
4392 __might_sleep(__FILE__, __LINE__); 4401 __might_sleep(__FILE__, __LINE__);
@@ -4396,10 +4405,6 @@ static inline void __cond_resched(void)
4396 * PREEMPT_ACTIVE, which could trigger a second 4405 * PREEMPT_ACTIVE, which could trigger a second
4397 * cond_resched() call. 4406 * cond_resched() call.
4398 */ 4407 */
4399 if (unlikely(preempt_count()))
4400 return;
4401 if (unlikely(system_state != SYSTEM_RUNNING))
4402 return;
4403 do { 4408 do {
4404 add_preempt_count(PREEMPT_ACTIVE); 4409 add_preempt_count(PREEMPT_ACTIVE);
4405 schedule(); 4410 schedule();
@@ -4409,13 +4414,12 @@ static inline void __cond_resched(void)
4409 4414
4410int __sched cond_resched(void) 4415int __sched cond_resched(void)
4411{ 4416{
4412 if (need_resched()) { 4417 if (need_resched() && __resched_legal()) {
4413 __cond_resched(); 4418 __cond_resched();
4414 return 1; 4419 return 1;
4415 } 4420 }
4416 return 0; 4421 return 0;
4417} 4422}
4418
4419EXPORT_SYMBOL(cond_resched); 4423EXPORT_SYMBOL(cond_resched);
4420 4424
4421/* 4425/*
@@ -4436,7 +4440,7 @@ int cond_resched_lock(spinlock_t *lock)
4436 ret = 1; 4440 ret = 1;
4437 spin_lock(lock); 4441 spin_lock(lock);
4438 } 4442 }
4439 if (need_resched()) { 4443 if (need_resched() && __resched_legal()) {
4440 _raw_spin_unlock(lock); 4444 _raw_spin_unlock(lock);
4441 preempt_enable_no_resched(); 4445 preempt_enable_no_resched();
4442 __cond_resched(); 4446 __cond_resched();
@@ -4445,14 +4449,13 @@ int cond_resched_lock(spinlock_t *lock)
4445 } 4449 }
4446 return ret; 4450 return ret;
4447} 4451}
4448
4449EXPORT_SYMBOL(cond_resched_lock); 4452EXPORT_SYMBOL(cond_resched_lock);
4450 4453
4451int __sched cond_resched_softirq(void) 4454int __sched cond_resched_softirq(void)
4452{ 4455{
4453 BUG_ON(!in_softirq()); 4456 BUG_ON(!in_softirq());
4454 4457
4455 if (need_resched()) { 4458 if (need_resched() && __resched_legal()) {
4456 __local_bh_enable(); 4459 __local_bh_enable();
4457 __cond_resched(); 4460 __cond_resched();
4458 local_bh_disable(); 4461 local_bh_disable();
@@ -4460,10 +4463,8 @@ int __sched cond_resched_softirq(void)
4460 } 4463 }
4461 return 0; 4464 return 0;
4462} 4465}
4463
4464EXPORT_SYMBOL(cond_resched_softirq); 4466EXPORT_SYMBOL(cond_resched_softirq);
4465 4467
4466
4467/** 4468/**
4468 * yield - yield the current processor to other threads. 4469 * yield - yield the current processor to other threads.
4469 * 4470 *
diff --git a/kernel/signal.c b/kernel/signal.c
index 52adf53929f6..7fe874d12fae 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -10,7 +10,6 @@
10 * to allow signals to be sent reliably. 10 * to allow signals to be sent reliably.
11 */ 11 */
12 12
13#include <linux/config.h>
14#include <linux/slab.h> 13#include <linux/slab.h>
15#include <linux/module.h> 14#include <linux/module.h>
16#include <linux/smp_lock.h> 15#include <linux/smp_lock.h>
@@ -584,7 +583,7 @@ static int check_kill_permission(int sig, struct siginfo *info,
584 && !capable(CAP_KILL)) 583 && !capable(CAP_KILL))
585 return error; 584 return error;
586 585
587 error = security_task_kill(t, info, sig); 586 error = security_task_kill(t, info, sig, 0);
588 if (!error) 587 if (!error)
589 audit_signal_info(sig, t); /* Let audit system see the signal */ 588 audit_signal_info(sig, t); /* Let audit system see the signal */
590 return error; 589 return error;
@@ -1107,7 +1106,7 @@ kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1107 1106
1108/* like kill_proc_info(), but doesn't use uid/euid of "current" */ 1107/* like kill_proc_info(), but doesn't use uid/euid of "current" */
1109int kill_proc_info_as_uid(int sig, struct siginfo *info, pid_t pid, 1108int kill_proc_info_as_uid(int sig, struct siginfo *info, pid_t pid,
1110 uid_t uid, uid_t euid) 1109 uid_t uid, uid_t euid, u32 secid)
1111{ 1110{
1112 int ret = -EINVAL; 1111 int ret = -EINVAL;
1113 struct task_struct *p; 1112 struct task_struct *p;
@@ -1127,6 +1126,9 @@ int kill_proc_info_as_uid(int sig, struct siginfo *info, pid_t pid,
1127 ret = -EPERM; 1126 ret = -EPERM;
1128 goto out_unlock; 1127 goto out_unlock;
1129 } 1128 }
1129 ret = security_task_kill(p, info, sig, secid);
1130 if (ret)
1131 goto out_unlock;
1130 if (sig && p->sighand) { 1132 if (sig && p->sighand) {
1131 unsigned long flags; 1133 unsigned long flags;
1132 spin_lock_irqsave(&p->sighand->siglock, flags); 1134 spin_lock_irqsave(&p->sighand->siglock, flags);
diff --git a/kernel/spinlock.c b/kernel/spinlock.c
index d1b810782bc4..b31e54eadf56 100644
--- a/kernel/spinlock.c
+++ b/kernel/spinlock.c
@@ -9,7 +9,6 @@
9 * SMP and the DEBUG_SPINLOCK cases. (UP-nondebug inlines them) 9 * SMP and the DEBUG_SPINLOCK cases. (UP-nondebug inlines them)
10 */ 10 */
11 11
12#include <linux/config.h>
13#include <linux/linkage.h> 12#include <linux/linkage.h>
14#include <linux/preempt.h> 13#include <linux/preempt.h>
15#include <linux/spinlock.h> 14#include <linux/spinlock.h>
diff --git a/kernel/sys.c b/kernel/sys.c
index 2d5179c67cec..dbb3b9c7ea64 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -4,7 +4,6 @@
4 * Copyright (C) 1991, 1992 Linus Torvalds 4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */ 5 */
6 6
7#include <linux/config.h>
8#include <linux/module.h> 7#include <linux/module.h>
9#include <linux/mm.h> 8#include <linux/mm.h>
10#include <linux/utsname.h> 9#include <linux/utsname.h>
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 93a2c5398648..99a58f279077 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -18,7 +18,6 @@
18 * Removed it and replaced it with older style, 03/23/00, Bill Wendling 18 * Removed it and replaced it with older style, 03/23/00, Bill Wendling
19 */ 19 */
20 20
21#include <linux/config.h>
22#include <linux/module.h> 21#include <linux/module.h>
23#include <linux/mm.h> 22#include <linux/mm.h>
24#include <linux/swap.h> 23#include <linux/swap.h>
@@ -933,15 +932,6 @@ static ctl_table vm_table[] = {
933 .strategy = &sysctl_intvec, 932 .strategy = &sysctl_intvec,
934 .extra1 = &zero, 933 .extra1 = &zero,
935 }, 934 },
936 {
937 .ctl_name = VM_ZONE_RECLAIM_INTERVAL,
938 .procname = "zone_reclaim_interval",
939 .data = &zone_reclaim_interval,
940 .maxlen = sizeof(zone_reclaim_interval),
941 .mode = 0644,
942 .proc_handler = &proc_dointvec_jiffies,
943 .strategy = &sysctl_jiffies,
944 },
945#endif 935#endif
946#ifdef CONFIG_X86_32 936#ifdef CONFIG_X86_32
947 { 937 {
diff --git a/kernel/wait.c b/kernel/wait.c
index 791681cfea98..5985d866531f 100644
--- a/kernel/wait.c
+++ b/kernel/wait.c
@@ -3,7 +3,6 @@
3 * 3 *
4 * (C) 2004 William Irwin, Oracle 4 * (C) 2004 William Irwin, Oracle
5 */ 5 */
6#include <linux/config.h>
7#include <linux/init.h> 6#include <linux/init.h>
8#include <linux/module.h> 7#include <linux/module.h>
9#include <linux/sched.h> 8#include <linux/sched.h>