diff options
| -rw-r--r-- | arch/ia64/ia32/sys_ia32.c | 2 | ||||
| -rw-r--r-- | arch/ia64/kernel/mca.c | 4 | ||||
| -rw-r--r-- | arch/ppc64/kernel/prom_init.c | 102 | ||||
| -rw-r--r-- | arch/sparc64/kernel/pci_iommu.c | 88 | ||||
| -rw-r--r-- | arch/sparc64/kernel/pci_psycho.c | 2 | ||||
| -rw-r--r-- | arch/sparc64/kernel/pci_sabre.c | 2 | ||||
| -rw-r--r-- | arch/sparc64/kernel/pci_schizo.c | 2 | ||||
| -rw-r--r-- | arch/sparc64/kernel/sbus.c | 20 | ||||
| -rw-r--r-- | include/asm-sparc64/iommu.h | 2 | ||||
| -rw-r--r-- | include/asm-sparc64/pbm.h | 8 | ||||
| -rw-r--r-- | net/ipv4/esp4.c | 2 | ||||
| -rw-r--r-- | net/ipv4/netfilter/ip_queue.c | 10 | ||||
| -rw-r--r-- | net/ipv4/udp.c | 12 | ||||
| -rw-r--r-- | net/sched/sch_dsmark.c | 16 |
14 files changed, 184 insertions, 88 deletions
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c index 247a21c64aea..c1e20d65dd6c 100644 --- a/arch/ia64/ia32/sys_ia32.c +++ b/arch/ia64/ia32/sys_ia32.c | |||
| @@ -2427,7 +2427,7 @@ sys32_epoll_wait(int epfd, struct epoll_event32 __user * events, int maxevents, | |||
| 2427 | { | 2427 | { |
| 2428 | struct epoll_event *events64 = NULL; | 2428 | struct epoll_event *events64 = NULL; |
| 2429 | mm_segment_t old_fs = get_fs(); | 2429 | mm_segment_t old_fs = get_fs(); |
| 2430 | int error, numevents, size; | 2430 | int numevents, size; |
| 2431 | int evt_idx; | 2431 | int evt_idx; |
| 2432 | int do_free_pages = 0; | 2432 | int do_free_pages = 0; |
| 2433 | 2433 | ||
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 2c75741dcc66..736e328b5e61 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c | |||
| @@ -1103,8 +1103,6 @@ ia64_mca_cpe_int_caller(int cpe_irq, void *arg, struct pt_regs *ptregs) | |||
| 1103 | return IRQ_HANDLED; | 1103 | return IRQ_HANDLED; |
| 1104 | } | 1104 | } |
| 1105 | 1105 | ||
| 1106 | #endif /* CONFIG_ACPI */ | ||
| 1107 | |||
| 1108 | /* | 1106 | /* |
| 1109 | * ia64_mca_cpe_poll | 1107 | * ia64_mca_cpe_poll |
| 1110 | * | 1108 | * |
| @@ -1122,6 +1120,8 @@ ia64_mca_cpe_poll (unsigned long dummy) | |||
| 1122 | platform_send_ipi(first_cpu(cpu_online_map), IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0); | 1120 | platform_send_ipi(first_cpu(cpu_online_map), IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0); |
| 1123 | } | 1121 | } |
| 1124 | 1122 | ||
| 1123 | #endif /* CONFIG_ACPI */ | ||
| 1124 | |||
| 1125 | /* | 1125 | /* |
| 1126 | * C portion of the OS INIT handler | 1126 | * C portion of the OS INIT handler |
| 1127 | * | 1127 | * |
diff --git a/arch/ppc64/kernel/prom_init.c b/arch/ppc64/kernel/prom_init.c index 3de950de3671..1ac531ba7056 100644 --- a/arch/ppc64/kernel/prom_init.c +++ b/arch/ppc64/kernel/prom_init.c | |||
| @@ -211,13 +211,23 @@ struct { | |||
| 211 | */ | 211 | */ |
| 212 | #define ADDR(x) (u32) ((unsigned long)(x) - offset) | 212 | #define ADDR(x) (u32) ((unsigned long)(x) - offset) |
| 213 | 213 | ||
| 214 | /* | ||
| 215 | * Error results ... some OF calls will return "-1" on error, some | ||
| 216 | * will return 0, some will return either. To simplify, here are | ||
| 217 | * macros to use with any ihandle or phandle return value to check if | ||
| 218 | * it is valid | ||
| 219 | */ | ||
| 220 | |||
| 221 | #define PROM_ERROR (-1u) | ||
| 222 | #define PHANDLE_VALID(p) ((p) != 0 && (p) != PROM_ERROR) | ||
| 223 | #define IHANDLE_VALID(i) ((i) != 0 && (i) != PROM_ERROR) | ||
| 224 | |||
| 225 | |||
| 214 | /* This is the one and *ONLY* place where we actually call open | 226 | /* This is the one and *ONLY* place where we actually call open |
| 215 | * firmware from, since we need to make sure we're running in 32b | 227 | * firmware from, since we need to make sure we're running in 32b |
| 216 | * mode when we do. We switch back to 64b mode upon return. | 228 | * mode when we do. We switch back to 64b mode upon return. |
| 217 | */ | 229 | */ |
| 218 | 230 | ||
| 219 | #define PROM_ERROR (-1) | ||
| 220 | |||
| 221 | static int __init call_prom(const char *service, int nargs, int nret, ...) | 231 | static int __init call_prom(const char *service, int nargs, int nret, ...) |
| 222 | { | 232 | { |
| 223 | int i; | 233 | int i; |
| @@ -587,14 +597,13 @@ static void __init prom_send_capabilities(void) | |||
| 587 | { | 597 | { |
| 588 | unsigned long offset = reloc_offset(); | 598 | unsigned long offset = reloc_offset(); |
| 589 | ihandle elfloader; | 599 | ihandle elfloader; |
| 590 | int ret; | ||
| 591 | 600 | ||
| 592 | elfloader = call_prom("open", 1, 1, ADDR("/packages/elf-loader")); | 601 | elfloader = call_prom("open", 1, 1, ADDR("/packages/elf-loader")); |
| 593 | if (elfloader == 0) { | 602 | if (elfloader == 0) { |
| 594 | prom_printf("couldn't open /packages/elf-loader\n"); | 603 | prom_printf("couldn't open /packages/elf-loader\n"); |
| 595 | return; | 604 | return; |
| 596 | } | 605 | } |
| 597 | ret = call_prom("call-method", 3, 1, ADDR("process-elf-header"), | 606 | call_prom("call-method", 3, 1, ADDR("process-elf-header"), |
| 598 | elfloader, ADDR(&fake_elf)); | 607 | elfloader, ADDR(&fake_elf)); |
| 599 | call_prom("close", 1, 0, elfloader); | 608 | call_prom("close", 1, 0, elfloader); |
| 600 | } | 609 | } |
| @@ -646,7 +655,7 @@ static unsigned long __init alloc_up(unsigned long size, unsigned long align) | |||
| 646 | base = _ALIGN_UP(base + 0x100000, align)) { | 655 | base = _ALIGN_UP(base + 0x100000, align)) { |
| 647 | prom_debug(" trying: 0x%x\n\r", base); | 656 | prom_debug(" trying: 0x%x\n\r", base); |
| 648 | addr = (unsigned long)prom_claim(base, size, 0); | 657 | addr = (unsigned long)prom_claim(base, size, 0); |
| 649 | if ((int)addr != PROM_ERROR) | 658 | if (addr != PROM_ERROR) |
| 650 | break; | 659 | break; |
| 651 | addr = 0; | 660 | addr = 0; |
| 652 | if (align == 0) | 661 | if (align == 0) |
| @@ -708,7 +717,7 @@ static unsigned long __init alloc_down(unsigned long size, unsigned long align, | |||
| 708 | for(; base > RELOC(alloc_bottom); base = _ALIGN_DOWN(base - 0x100000, align)) { | 717 | for(; base > RELOC(alloc_bottom); base = _ALIGN_DOWN(base - 0x100000, align)) { |
| 709 | prom_debug(" trying: 0x%x\n\r", base); | 718 | prom_debug(" trying: 0x%x\n\r", base); |
| 710 | addr = (unsigned long)prom_claim(base, size, 0); | 719 | addr = (unsigned long)prom_claim(base, size, 0); |
| 711 | if ((int)addr != PROM_ERROR) | 720 | if (addr != PROM_ERROR) |
| 712 | break; | 721 | break; |
| 713 | addr = 0; | 722 | addr = 0; |
| 714 | } | 723 | } |
| @@ -902,18 +911,19 @@ static void __init prom_instantiate_rtas(void) | |||
| 902 | { | 911 | { |
| 903 | unsigned long offset = reloc_offset(); | 912 | unsigned long offset = reloc_offset(); |
| 904 | struct prom_t *_prom = PTRRELOC(&prom); | 913 | struct prom_t *_prom = PTRRELOC(&prom); |
| 905 | phandle prom_rtas, rtas_node; | 914 | phandle rtas_node; |
| 915 | ihandle rtas_inst; | ||
| 906 | u32 base, entry = 0; | 916 | u32 base, entry = 0; |
| 907 | u32 size = 0; | 917 | u32 size = 0; |
| 908 | 918 | ||
| 909 | prom_debug("prom_instantiate_rtas: start...\n"); | 919 | prom_debug("prom_instantiate_rtas: start...\n"); |
| 910 | 920 | ||
| 911 | prom_rtas = call_prom("finddevice", 1, 1, ADDR("/rtas")); | 921 | rtas_node = call_prom("finddevice", 1, 1, ADDR("/rtas")); |
| 912 | prom_debug("prom_rtas: %x\n", prom_rtas); | 922 | prom_debug("rtas_node: %x\n", rtas_node); |
| 913 | if (prom_rtas == (phandle) -1) | 923 | if (!PHANDLE_VALID(rtas_node)) |
| 914 | return; | 924 | return; |
| 915 | 925 | ||
| 916 | prom_getprop(prom_rtas, "rtas-size", &size, sizeof(size)); | 926 | prom_getprop(rtas_node, "rtas-size", &size, sizeof(size)); |
| 917 | if (size == 0) | 927 | if (size == 0) |
| 918 | return; | 928 | return; |
| 919 | 929 | ||
| @@ -922,14 +932,18 @@ static void __init prom_instantiate_rtas(void) | |||
| 922 | prom_printf("RTAS allocation failed !\n"); | 932 | prom_printf("RTAS allocation failed !\n"); |
| 923 | return; | 933 | return; |
| 924 | } | 934 | } |
| 925 | prom_printf("instantiating rtas at 0x%x", base); | ||
| 926 | 935 | ||
| 927 | rtas_node = call_prom("open", 1, 1, ADDR("/rtas")); | 936 | rtas_inst = call_prom("open", 1, 1, ADDR("/rtas")); |
| 928 | prom_printf("..."); | 937 | if (!IHANDLE_VALID(rtas_inst)) { |
| 938 | prom_printf("opening rtas package failed"); | ||
| 939 | return; | ||
| 940 | } | ||
| 941 | |||
| 942 | prom_printf("instantiating rtas at 0x%x ...", base); | ||
| 929 | 943 | ||
| 930 | if (call_prom("call-method", 3, 2, | 944 | if (call_prom("call-method", 3, 2, |
| 931 | ADDR("instantiate-rtas"), | 945 | ADDR("instantiate-rtas"), |
| 932 | rtas_node, base) != PROM_ERROR) { | 946 | rtas_inst, base) != PROM_ERROR) { |
| 933 | entry = (long)_prom->args.rets[1]; | 947 | entry = (long)_prom->args.rets[1]; |
| 934 | } | 948 | } |
| 935 | if (entry == 0) { | 949 | if (entry == 0) { |
| @@ -940,8 +954,8 @@ static void __init prom_instantiate_rtas(void) | |||
| 940 | 954 | ||
| 941 | reserve_mem(base, size); | 955 | reserve_mem(base, size); |
| 942 | 956 | ||
| 943 | prom_setprop(prom_rtas, "linux,rtas-base", &base, sizeof(base)); | 957 | prom_setprop(rtas_node, "linux,rtas-base", &base, sizeof(base)); |
| 944 | prom_setprop(prom_rtas, "linux,rtas-entry", &entry, sizeof(entry)); | 958 | prom_setprop(rtas_node, "linux,rtas-entry", &entry, sizeof(entry)); |
| 945 | 959 | ||
| 946 | prom_debug("rtas base = 0x%x\n", base); | 960 | prom_debug("rtas base = 0x%x\n", base); |
| 947 | prom_debug("rtas entry = 0x%x\n", entry); | 961 | prom_debug("rtas entry = 0x%x\n", entry); |
| @@ -1062,7 +1076,7 @@ static void __init prom_initialize_tce_table(void) | |||
| 1062 | 1076 | ||
| 1063 | prom_printf("opening PHB %s", path); | 1077 | prom_printf("opening PHB %s", path); |
| 1064 | phb_node = call_prom("open", 1, 1, path); | 1078 | phb_node = call_prom("open", 1, 1, path); |
| 1065 | if ( (long)phb_node <= 0) | 1079 | if (phb_node == 0) |
| 1066 | prom_printf("... failed\n"); | 1080 | prom_printf("... failed\n"); |
| 1067 | else | 1081 | else |
| 1068 | prom_printf("... done\n"); | 1082 | prom_printf("... done\n"); |
| @@ -1279,12 +1293,12 @@ static void __init prom_init_client_services(unsigned long pp) | |||
| 1279 | 1293 | ||
| 1280 | /* get a handle for the stdout device */ | 1294 | /* get a handle for the stdout device */ |
| 1281 | _prom->chosen = call_prom("finddevice", 1, 1, ADDR("/chosen")); | 1295 | _prom->chosen = call_prom("finddevice", 1, 1, ADDR("/chosen")); |
| 1282 | if ((long)_prom->chosen <= 0) | 1296 | if (!PHANDLE_VALID(_prom->chosen)) |
| 1283 | prom_panic("cannot find chosen"); /* msg won't be printed :( */ | 1297 | prom_panic("cannot find chosen"); /* msg won't be printed :( */ |
| 1284 | 1298 | ||
| 1285 | /* get device tree root */ | 1299 | /* get device tree root */ |
| 1286 | _prom->root = call_prom("finddevice", 1, 1, ADDR("/")); | 1300 | _prom->root = call_prom("finddevice", 1, 1, ADDR("/")); |
| 1287 | if ((long)_prom->root <= 0) | 1301 | if (!PHANDLE_VALID(_prom->root)) |
| 1288 | prom_panic("cannot find device tree root"); /* msg won't be printed :( */ | 1302 | prom_panic("cannot find device tree root"); /* msg won't be printed :( */ |
| 1289 | } | 1303 | } |
| 1290 | 1304 | ||
| @@ -1356,9 +1370,8 @@ static int __init prom_find_machine_type(void) | |||
| 1356 | } | 1370 | } |
| 1357 | /* Default to pSeries. We need to know if we are running LPAR */ | 1371 | /* Default to pSeries. We need to know if we are running LPAR */ |
| 1358 | rtas = call_prom("finddevice", 1, 1, ADDR("/rtas")); | 1372 | rtas = call_prom("finddevice", 1, 1, ADDR("/rtas")); |
| 1359 | if (rtas != (phandle) -1) { | 1373 | if (!PHANDLE_VALID(rtas)) { |
| 1360 | unsigned long x; | 1374 | int x = prom_getproplen(rtas, "ibm,hypertas-functions"); |
| 1361 | x = prom_getproplen(rtas, "ibm,hypertas-functions"); | ||
| 1362 | if (x != PROM_ERROR) { | 1375 | if (x != PROM_ERROR) { |
| 1363 | prom_printf("Hypertas detected, assuming LPAR !\n"); | 1376 | prom_printf("Hypertas detected, assuming LPAR !\n"); |
| 1364 | return PLATFORM_PSERIES_LPAR; | 1377 | return PLATFORM_PSERIES_LPAR; |
| @@ -1426,12 +1439,13 @@ static void __init prom_check_displays(void) | |||
| 1426 | * leave some room at the end of the path for appending extra | 1439 | * leave some room at the end of the path for appending extra |
| 1427 | * arguments | 1440 | * arguments |
| 1428 | */ | 1441 | */ |
| 1429 | if (call_prom("package-to-path", 3, 1, node, path, PROM_SCRATCH_SIZE-10) < 0) | 1442 | if (call_prom("package-to-path", 3, 1, node, path, |
| 1443 | PROM_SCRATCH_SIZE-10) == PROM_ERROR) | ||
| 1430 | continue; | 1444 | continue; |
| 1431 | prom_printf("found display : %s, opening ... ", path); | 1445 | prom_printf("found display : %s, opening ... ", path); |
| 1432 | 1446 | ||
| 1433 | ih = call_prom("open", 1, 1, path); | 1447 | ih = call_prom("open", 1, 1, path); |
| 1434 | if (ih == (ihandle)0 || ih == (ihandle)-1) { | 1448 | if (ih == 0) { |
| 1435 | prom_printf("failed\n"); | 1449 | prom_printf("failed\n"); |
| 1436 | continue; | 1450 | continue; |
| 1437 | } | 1451 | } |
| @@ -1514,6 +1528,12 @@ static unsigned long __init dt_find_string(char *str) | |||
| 1514 | return 0; | 1528 | return 0; |
| 1515 | } | 1529 | } |
| 1516 | 1530 | ||
| 1531 | /* | ||
| 1532 | * The Open Firmware 1275 specification states properties must be 31 bytes or | ||
| 1533 | * less, however not all firmwares obey this. Make it 64 bytes to be safe. | ||
| 1534 | */ | ||
| 1535 | #define MAX_PROPERTY_NAME 64 | ||
| 1536 | |||
| 1517 | static void __init scan_dt_build_strings(phandle node, unsigned long *mem_start, | 1537 | static void __init scan_dt_build_strings(phandle node, unsigned long *mem_start, |
| 1518 | unsigned long *mem_end) | 1538 | unsigned long *mem_end) |
| 1519 | { | 1539 | { |
| @@ -1527,10 +1547,12 @@ static void __init scan_dt_build_strings(phandle node, unsigned long *mem_start, | |||
| 1527 | /* get and store all property names */ | 1547 | /* get and store all property names */ |
| 1528 | prev_name = RELOC(""); | 1548 | prev_name = RELOC(""); |
| 1529 | for (;;) { | 1549 | for (;;) { |
| 1530 | 1550 | int rc; | |
| 1531 | /* 32 is max len of name including nul. */ | 1551 | |
| 1532 | namep = make_room(mem_start, mem_end, 32, 1); | 1552 | /* 64 is max len of name including nul. */ |
| 1533 | if (call_prom("nextprop", 3, 1, node, prev_name, namep) <= 0) { | 1553 | namep = make_room(mem_start, mem_end, MAX_PROPERTY_NAME, 1); |
| 1554 | rc = call_prom("nextprop", 3, 1, node, prev_name, namep); | ||
| 1555 | if (rc != 1) { | ||
| 1534 | /* No more nodes: unwind alloc */ | 1556 | /* No more nodes: unwind alloc */ |
| 1535 | *mem_start = (unsigned long)namep; | 1557 | *mem_start = (unsigned long)namep; |
| 1536 | break; | 1558 | break; |
| @@ -1555,12 +1577,6 @@ static void __init scan_dt_build_strings(phandle node, unsigned long *mem_start, | |||
| 1555 | } | 1577 | } |
| 1556 | } | 1578 | } |
| 1557 | 1579 | ||
| 1558 | /* | ||
| 1559 | * The Open Firmware 1275 specification states properties must be 31 bytes or | ||
| 1560 | * less, however not all firmwares obey this. Make it 64 bytes to be safe. | ||
| 1561 | */ | ||
| 1562 | #define MAX_PROPERTY_NAME 64 | ||
| 1563 | |||
| 1564 | static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start, | 1580 | static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start, |
| 1565 | unsigned long *mem_end) | 1581 | unsigned long *mem_end) |
| 1566 | { | 1582 | { |
| @@ -1607,7 +1623,10 @@ static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start, | |||
| 1607 | prev_name = RELOC(""); | 1623 | prev_name = RELOC(""); |
| 1608 | sstart = (char *)RELOC(dt_string_start); | 1624 | sstart = (char *)RELOC(dt_string_start); |
| 1609 | for (;;) { | 1625 | for (;;) { |
| 1610 | if (call_prom("nextprop", 3, 1, node, prev_name, pname) <= 0) | 1626 | int rc; |
| 1627 | |||
| 1628 | rc = call_prom("nextprop", 3, 1, node, prev_name, pname); | ||
| 1629 | if (rc != 1) | ||
| 1611 | break; | 1630 | break; |
| 1612 | 1631 | ||
| 1613 | /* find string offset */ | 1632 | /* find string offset */ |
| @@ -1623,7 +1642,7 @@ static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start, | |||
| 1623 | l = call_prom("getproplen", 2, 1, node, pname); | 1642 | l = call_prom("getproplen", 2, 1, node, pname); |
| 1624 | 1643 | ||
| 1625 | /* sanity checks */ | 1644 | /* sanity checks */ |
| 1626 | if (l < 0) | 1645 | if (l == PROM_ERROR) |
| 1627 | continue; | 1646 | continue; |
| 1628 | if (l > MAX_PROPERTY_LENGTH) { | 1647 | if (l > MAX_PROPERTY_LENGTH) { |
| 1629 | prom_printf("WARNING: ignoring large property "); | 1648 | prom_printf("WARNING: ignoring large property "); |
| @@ -1771,17 +1790,18 @@ static void __init fixup_device_tree(void) | |||
| 1771 | 1790 | ||
| 1772 | /* Some G5s have a missing interrupt definition, fix it up here */ | 1791 | /* Some G5s have a missing interrupt definition, fix it up here */ |
| 1773 | u3 = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000")); | 1792 | u3 = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000")); |
| 1774 | if ((long)u3 <= 0) | 1793 | if (!PHANDLE_VALID(u3)) |
| 1775 | return; | 1794 | return; |
| 1776 | i2c = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/i2c@f8001000")); | 1795 | i2c = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/i2c@f8001000")); |
| 1777 | if ((long)i2c <= 0) | 1796 | if (!PHANDLE_VALID(i2c)) |
| 1778 | return; | 1797 | return; |
| 1779 | mpic = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/mpic@f8040000")); | 1798 | mpic = call_prom("finddevice", 1, 1, ADDR("/u3@0,f8000000/mpic@f8040000")); |
| 1780 | if ((long)mpic <= 0) | 1799 | if (!PHANDLE_VALID(mpic)) |
| 1781 | return; | 1800 | return; |
| 1782 | 1801 | ||
| 1783 | /* check if proper rev of u3 */ | 1802 | /* check if proper rev of u3 */ |
| 1784 | if (prom_getprop(u3, "device-rev", &u3_rev, sizeof(u3_rev)) <= 0) | 1803 | if (prom_getprop(u3, "device-rev", &u3_rev, sizeof(u3_rev)) |
| 1804 | == PROM_ERROR) | ||
| 1785 | return; | 1805 | return; |
| 1786 | if (u3_rev != 0x35) | 1806 | if (u3_rev != 0x35) |
| 1787 | return; | 1807 | return; |
diff --git a/arch/sparc64/kernel/pci_iommu.c b/arch/sparc64/kernel/pci_iommu.c index 33ca56c90da2..2803bc7c2c79 100644 --- a/arch/sparc64/kernel/pci_iommu.c +++ b/arch/sparc64/kernel/pci_iommu.c | |||
| @@ -196,6 +196,34 @@ static iopte_t *alloc_consistent_cluster(struct pci_iommu *iommu, unsigned long | |||
| 196 | return NULL; | 196 | return NULL; |
| 197 | } | 197 | } |
| 198 | 198 | ||
| 199 | static int iommu_alloc_ctx(struct pci_iommu *iommu) | ||
| 200 | { | ||
| 201 | int lowest = iommu->ctx_lowest_free; | ||
| 202 | int sz = IOMMU_NUM_CTXS - lowest; | ||
| 203 | int n = find_next_zero_bit(iommu->ctx_bitmap, sz, lowest); | ||
| 204 | |||
| 205 | if (unlikely(n == sz)) { | ||
| 206 | n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1); | ||
| 207 | if (unlikely(n == lowest)) { | ||
| 208 | printk(KERN_WARNING "IOMMU: Ran out of contexts.\n"); | ||
| 209 | n = 0; | ||
| 210 | } | ||
| 211 | } | ||
| 212 | if (n) | ||
| 213 | __set_bit(n, iommu->ctx_bitmap); | ||
| 214 | |||
| 215 | return n; | ||
| 216 | } | ||
| 217 | |||
| 218 | static inline void iommu_free_ctx(struct pci_iommu *iommu, int ctx) | ||
| 219 | { | ||
| 220 | if (likely(ctx)) { | ||
| 221 | __clear_bit(ctx, iommu->ctx_bitmap); | ||
| 222 | if (ctx < iommu->ctx_lowest_free) | ||
| 223 | iommu->ctx_lowest_free = ctx; | ||
| 224 | } | ||
| 225 | } | ||
| 226 | |||
| 199 | /* Allocate and map kernel buffer of size SIZE using consistent mode | 227 | /* Allocate and map kernel buffer of size SIZE using consistent mode |
| 200 | * DMA for PCI device PDEV. Return non-NULL cpu-side address if | 228 | * DMA for PCI device PDEV. Return non-NULL cpu-side address if |
| 201 | * successful and set *DMA_ADDRP to the PCI side dma address. | 229 | * successful and set *DMA_ADDRP to the PCI side dma address. |
| @@ -236,7 +264,7 @@ void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_ad | |||
| 236 | npages = size >> IO_PAGE_SHIFT; | 264 | npages = size >> IO_PAGE_SHIFT; |
| 237 | ctx = 0; | 265 | ctx = 0; |
| 238 | if (iommu->iommu_ctxflush) | 266 | if (iommu->iommu_ctxflush) |
| 239 | ctx = iommu->iommu_cur_ctx++; | 267 | ctx = iommu_alloc_ctx(iommu); |
| 240 | first_page = __pa(first_page); | 268 | first_page = __pa(first_page); |
| 241 | while (npages--) { | 269 | while (npages--) { |
| 242 | iopte_val(*iopte) = (IOPTE_CONSISTENT(ctx) | | 270 | iopte_val(*iopte) = (IOPTE_CONSISTENT(ctx) | |
| @@ -317,6 +345,8 @@ void pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_ | |||
| 317 | } | 345 | } |
| 318 | } | 346 | } |
| 319 | 347 | ||
| 348 | iommu_free_ctx(iommu, ctx); | ||
| 349 | |||
| 320 | spin_unlock_irqrestore(&iommu->lock, flags); | 350 | spin_unlock_irqrestore(&iommu->lock, flags); |
| 321 | 351 | ||
| 322 | order = get_order(size); | 352 | order = get_order(size); |
| @@ -360,7 +390,7 @@ dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direct | |||
| 360 | base_paddr = __pa(oaddr & IO_PAGE_MASK); | 390 | base_paddr = __pa(oaddr & IO_PAGE_MASK); |
| 361 | ctx = 0; | 391 | ctx = 0; |
| 362 | if (iommu->iommu_ctxflush) | 392 | if (iommu->iommu_ctxflush) |
| 363 | ctx = iommu->iommu_cur_ctx++; | 393 | ctx = iommu_alloc_ctx(iommu); |
| 364 | if (strbuf->strbuf_enabled) | 394 | if (strbuf->strbuf_enabled) |
| 365 | iopte_protection = IOPTE_STREAMING(ctx); | 395 | iopte_protection = IOPTE_STREAMING(ctx); |
| 366 | else | 396 | else |
| @@ -380,39 +410,53 @@ bad: | |||
| 380 | return PCI_DMA_ERROR_CODE; | 410 | return PCI_DMA_ERROR_CODE; |
| 381 | } | 411 | } |
| 382 | 412 | ||
| 383 | static void pci_strbuf_flush(struct pci_strbuf *strbuf, struct pci_iommu *iommu, u32 vaddr, unsigned long ctx, unsigned long npages) | 413 | static void pci_strbuf_flush(struct pci_strbuf *strbuf, struct pci_iommu *iommu, u32 vaddr, unsigned long ctx, unsigned long npages, int direction) |
| 384 | { | 414 | { |
| 385 | int limit; | 415 | int limit; |
| 386 | 416 | ||
| 387 | PCI_STC_FLUSHFLAG_INIT(strbuf); | ||
| 388 | if (strbuf->strbuf_ctxflush && | 417 | if (strbuf->strbuf_ctxflush && |
| 389 | iommu->iommu_ctxflush) { | 418 | iommu->iommu_ctxflush) { |
| 390 | unsigned long matchreg, flushreg; | 419 | unsigned long matchreg, flushreg; |
| 420 | u64 val; | ||
| 391 | 421 | ||
| 392 | flushreg = strbuf->strbuf_ctxflush; | 422 | flushreg = strbuf->strbuf_ctxflush; |
| 393 | matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx); | 423 | matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx); |
| 394 | 424 | ||
| 395 | limit = 100000; | ||
| 396 | pci_iommu_write(flushreg, ctx); | 425 | pci_iommu_write(flushreg, ctx); |
| 397 | for(;;) { | 426 | val = pci_iommu_read(matchreg); |
| 398 | if (((long)pci_iommu_read(matchreg)) >= 0L) | 427 | val &= 0xffff; |
| 399 | break; | 428 | if (!val) |
| 400 | limit--; | 429 | goto do_flush_sync; |
| 401 | if (!limit) | 430 | |
| 402 | break; | 431 | while (val) { |
| 403 | udelay(1); | 432 | if (val & 0x1) |
| 433 | pci_iommu_write(flushreg, ctx); | ||
| 434 | val >>= 1; | ||
| 404 | } | 435 | } |
| 405 | if (!limit) | 436 | val = pci_iommu_read(matchreg); |
| 437 | if (unlikely(val)) { | ||
| 406 | printk(KERN_WARNING "pci_strbuf_flush: ctx flush " | 438 | printk(KERN_WARNING "pci_strbuf_flush: ctx flush " |
| 407 | "timeout vaddr[%08x] ctx[%lx]\n", | 439 | "timeout matchreg[%lx] ctx[%lx]\n", |
| 408 | vaddr, ctx); | 440 | val, ctx); |
| 441 | goto do_page_flush; | ||
| 442 | } | ||
| 409 | } else { | 443 | } else { |
| 410 | unsigned long i; | 444 | unsigned long i; |
| 411 | 445 | ||
| 446 | do_page_flush: | ||
| 412 | for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE) | 447 | for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE) |
| 413 | pci_iommu_write(strbuf->strbuf_pflush, vaddr); | 448 | pci_iommu_write(strbuf->strbuf_pflush, vaddr); |
| 414 | } | 449 | } |
| 415 | 450 | ||
| 451 | do_flush_sync: | ||
| 452 | /* If the device could not have possibly put dirty data into | ||
| 453 | * the streaming cache, no flush-flag synchronization needs | ||
| 454 | * to be performed. | ||
| 455 | */ | ||
| 456 | if (direction == PCI_DMA_TODEVICE) | ||
| 457 | return; | ||
| 458 | |||
| 459 | PCI_STC_FLUSHFLAG_INIT(strbuf); | ||
| 416 | pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa); | 460 | pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa); |
| 417 | (void) pci_iommu_read(iommu->write_complete_reg); | 461 | (void) pci_iommu_read(iommu->write_complete_reg); |
| 418 | 462 | ||
| @@ -466,7 +510,7 @@ void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int | |||
| 466 | 510 | ||
| 467 | /* Step 1: Kick data out of streaming buffers if necessary. */ | 511 | /* Step 1: Kick data out of streaming buffers if necessary. */ |
| 468 | if (strbuf->strbuf_enabled) | 512 | if (strbuf->strbuf_enabled) |
| 469 | pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages); | 513 | pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); |
| 470 | 514 | ||
| 471 | /* Step 2: Clear out first TSB entry. */ | 515 | /* Step 2: Clear out first TSB entry. */ |
| 472 | iopte_make_dummy(iommu, base); | 516 | iopte_make_dummy(iommu, base); |
| @@ -474,6 +518,8 @@ void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int | |||
| 474 | free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base, | 518 | free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base, |
| 475 | npages, ctx); | 519 | npages, ctx); |
| 476 | 520 | ||
| 521 | iommu_free_ctx(iommu, ctx); | ||
| 522 | |||
| 477 | spin_unlock_irqrestore(&iommu->lock, flags); | 523 | spin_unlock_irqrestore(&iommu->lock, flags); |
| 478 | } | 524 | } |
| 479 | 525 | ||
| @@ -613,7 +659,7 @@ int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int | |||
| 613 | /* Step 4: Choose a context if necessary. */ | 659 | /* Step 4: Choose a context if necessary. */ |
| 614 | ctx = 0; | 660 | ctx = 0; |
| 615 | if (iommu->iommu_ctxflush) | 661 | if (iommu->iommu_ctxflush) |
| 616 | ctx = iommu->iommu_cur_ctx++; | 662 | ctx = iommu_alloc_ctx(iommu); |
| 617 | 663 | ||
| 618 | /* Step 5: Create the mappings. */ | 664 | /* Step 5: Create the mappings. */ |
| 619 | if (strbuf->strbuf_enabled) | 665 | if (strbuf->strbuf_enabled) |
| @@ -678,7 +724,7 @@ void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, | |||
| 678 | 724 | ||
| 679 | /* Step 1: Kick data out of streaming buffers if necessary. */ | 725 | /* Step 1: Kick data out of streaming buffers if necessary. */ |
| 680 | if (strbuf->strbuf_enabled) | 726 | if (strbuf->strbuf_enabled) |
| 681 | pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages); | 727 | pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); |
| 682 | 728 | ||
| 683 | /* Step 2: Clear out first TSB entry. */ | 729 | /* Step 2: Clear out first TSB entry. */ |
| 684 | iopte_make_dummy(iommu, base); | 730 | iopte_make_dummy(iommu, base); |
| @@ -686,6 +732,8 @@ void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, | |||
| 686 | free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base, | 732 | free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base, |
| 687 | npages, ctx); | 733 | npages, ctx); |
| 688 | 734 | ||
| 735 | iommu_free_ctx(iommu, ctx); | ||
| 736 | |||
| 689 | spin_unlock_irqrestore(&iommu->lock, flags); | 737 | spin_unlock_irqrestore(&iommu->lock, flags); |
| 690 | } | 738 | } |
| 691 | 739 | ||
| @@ -724,7 +772,7 @@ void pci_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size | |||
| 724 | } | 772 | } |
| 725 | 773 | ||
| 726 | /* Step 2: Kick data out of streaming buffers. */ | 774 | /* Step 2: Kick data out of streaming buffers. */ |
| 727 | pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages); | 775 | pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); |
| 728 | 776 | ||
| 729 | spin_unlock_irqrestore(&iommu->lock, flags); | 777 | spin_unlock_irqrestore(&iommu->lock, flags); |
| 730 | } | 778 | } |
| @@ -768,7 +816,7 @@ void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, i | |||
| 768 | i--; | 816 | i--; |
| 769 | npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) | 817 | npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) |
| 770 | - bus_addr) >> IO_PAGE_SHIFT; | 818 | - bus_addr) >> IO_PAGE_SHIFT; |
| 771 | pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages); | 819 | pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction); |
| 772 | 820 | ||
| 773 | spin_unlock_irqrestore(&iommu->lock, flags); | 821 | spin_unlock_irqrestore(&iommu->lock, flags); |
| 774 | } | 822 | } |
diff --git a/arch/sparc64/kernel/pci_psycho.c b/arch/sparc64/kernel/pci_psycho.c index 3567fa879e1f..534320ef0db2 100644 --- a/arch/sparc64/kernel/pci_psycho.c +++ b/arch/sparc64/kernel/pci_psycho.c | |||
| @@ -1212,7 +1212,7 @@ static void __init psycho_iommu_init(struct pci_controller_info *p) | |||
| 1212 | 1212 | ||
| 1213 | /* Setup initial software IOMMU state. */ | 1213 | /* Setup initial software IOMMU state. */ |
| 1214 | spin_lock_init(&iommu->lock); | 1214 | spin_lock_init(&iommu->lock); |
| 1215 | iommu->iommu_cur_ctx = 0; | 1215 | iommu->ctx_lowest_free = 1; |
| 1216 | 1216 | ||
| 1217 | /* Register addresses. */ | 1217 | /* Register addresses. */ |
| 1218 | iommu->iommu_control = p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL; | 1218 | iommu->iommu_control = p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL; |
diff --git a/arch/sparc64/kernel/pci_sabre.c b/arch/sparc64/kernel/pci_sabre.c index 5525d1ec4af8..53d333b4a4e8 100644 --- a/arch/sparc64/kernel/pci_sabre.c +++ b/arch/sparc64/kernel/pci_sabre.c | |||
| @@ -1265,7 +1265,7 @@ static void __init sabre_iommu_init(struct pci_controller_info *p, | |||
| 1265 | 1265 | ||
| 1266 | /* Setup initial software IOMMU state. */ | 1266 | /* Setup initial software IOMMU state. */ |
| 1267 | spin_lock_init(&iommu->lock); | 1267 | spin_lock_init(&iommu->lock); |
| 1268 | iommu->iommu_cur_ctx = 0; | 1268 | iommu->ctx_lowest_free = 1; |
| 1269 | 1269 | ||
| 1270 | /* Register addresses. */ | 1270 | /* Register addresses. */ |
| 1271 | iommu->iommu_control = p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL; | 1271 | iommu->iommu_control = p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL; |
diff --git a/arch/sparc64/kernel/pci_schizo.c b/arch/sparc64/kernel/pci_schizo.c index e93fcadc3722..5753175b94e6 100644 --- a/arch/sparc64/kernel/pci_schizo.c +++ b/arch/sparc64/kernel/pci_schizo.c | |||
| @@ -1753,7 +1753,7 @@ static void schizo_pbm_iommu_init(struct pci_pbm_info *pbm) | |||
| 1753 | 1753 | ||
| 1754 | /* Setup initial software IOMMU state. */ | 1754 | /* Setup initial software IOMMU state. */ |
| 1755 | spin_lock_init(&iommu->lock); | 1755 | spin_lock_init(&iommu->lock); |
| 1756 | iommu->iommu_cur_ctx = 0; | 1756 | iommu->ctx_lowest_free = 1; |
| 1757 | 1757 | ||
| 1758 | /* Register addresses, SCHIZO has iommu ctx flushing. */ | 1758 | /* Register addresses, SCHIZO has iommu ctx flushing. */ |
| 1759 | iommu->iommu_control = pbm->pbm_regs + SCHIZO_IOMMU_CONTROL; | 1759 | iommu->iommu_control = pbm->pbm_regs + SCHIZO_IOMMU_CONTROL; |
diff --git a/arch/sparc64/kernel/sbus.c b/arch/sparc64/kernel/sbus.c index 76ea6455433f..89f5e019f24c 100644 --- a/arch/sparc64/kernel/sbus.c +++ b/arch/sparc64/kernel/sbus.c | |||
| @@ -117,17 +117,25 @@ static void iommu_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages | |||
| 117 | 117 | ||
| 118 | #define STRBUF_TAG_VALID 0x02UL | 118 | #define STRBUF_TAG_VALID 0x02UL |
| 119 | 119 | ||
| 120 | static void sbus_strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages) | 120 | static void sbus_strbuf_flush(struct sbus_iommu *iommu, u32 base, unsigned long npages, int direction) |
| 121 | { | 121 | { |
| 122 | unsigned long n; | 122 | unsigned long n; |
| 123 | int limit; | 123 | int limit; |
| 124 | 124 | ||
| 125 | iommu->strbuf_flushflag = 0UL; | ||
| 126 | n = npages; | 125 | n = npages; |
| 127 | while (n--) | 126 | while (n--) |
| 128 | upa_writeq(base + (n << IO_PAGE_SHIFT), | 127 | upa_writeq(base + (n << IO_PAGE_SHIFT), |
| 129 | iommu->strbuf_regs + STRBUF_PFLUSH); | 128 | iommu->strbuf_regs + STRBUF_PFLUSH); |
| 130 | 129 | ||
| 130 | /* If the device could not have possibly put dirty data into | ||
| 131 | * the streaming cache, no flush-flag synchronization needs | ||
| 132 | * to be performed. | ||
| 133 | */ | ||
| 134 | if (direction == SBUS_DMA_TODEVICE) | ||
| 135 | return; | ||
| 136 | |||
| 137 | iommu->strbuf_flushflag = 0UL; | ||
| 138 | |||
| 131 | /* Whoopee cushion! */ | 139 | /* Whoopee cushion! */ |
| 132 | upa_writeq(__pa(&iommu->strbuf_flushflag), | 140 | upa_writeq(__pa(&iommu->strbuf_flushflag), |
| 133 | iommu->strbuf_regs + STRBUF_FSYNC); | 141 | iommu->strbuf_regs + STRBUF_FSYNC); |
| @@ -421,7 +429,7 @@ void sbus_unmap_single(struct sbus_dev *sdev, dma_addr_t dma_addr, size_t size, | |||
| 421 | 429 | ||
| 422 | spin_lock_irqsave(&iommu->lock, flags); | 430 | spin_lock_irqsave(&iommu->lock, flags); |
| 423 | free_streaming_cluster(iommu, dma_base, size >> IO_PAGE_SHIFT); | 431 | free_streaming_cluster(iommu, dma_base, size >> IO_PAGE_SHIFT); |
| 424 | sbus_strbuf_flush(iommu, dma_base, size >> IO_PAGE_SHIFT); | 432 | sbus_strbuf_flush(iommu, dma_base, size >> IO_PAGE_SHIFT, direction); |
| 425 | spin_unlock_irqrestore(&iommu->lock, flags); | 433 | spin_unlock_irqrestore(&iommu->lock, flags); |
| 426 | } | 434 | } |
| 427 | 435 | ||
| @@ -584,7 +592,7 @@ void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sg, int nents, int | |||
| 584 | iommu = sdev->bus->iommu; | 592 | iommu = sdev->bus->iommu; |
| 585 | spin_lock_irqsave(&iommu->lock, flags); | 593 | spin_lock_irqsave(&iommu->lock, flags); |
| 586 | free_streaming_cluster(iommu, dvma_base, size >> IO_PAGE_SHIFT); | 594 | free_streaming_cluster(iommu, dvma_base, size >> IO_PAGE_SHIFT); |
| 587 | sbus_strbuf_flush(iommu, dvma_base, size >> IO_PAGE_SHIFT); | 595 | sbus_strbuf_flush(iommu, dvma_base, size >> IO_PAGE_SHIFT, direction); |
| 588 | spin_unlock_irqrestore(&iommu->lock, flags); | 596 | spin_unlock_irqrestore(&iommu->lock, flags); |
| 589 | } | 597 | } |
| 590 | 598 | ||
| @@ -596,7 +604,7 @@ void sbus_dma_sync_single_for_cpu(struct sbus_dev *sdev, dma_addr_t base, size_t | |||
| 596 | size = (IO_PAGE_ALIGN(base + size) - (base & IO_PAGE_MASK)); | 604 | size = (IO_PAGE_ALIGN(base + size) - (base & IO_PAGE_MASK)); |
| 597 | 605 | ||
| 598 | spin_lock_irqsave(&iommu->lock, flags); | 606 | spin_lock_irqsave(&iommu->lock, flags); |
| 599 | sbus_strbuf_flush(iommu, base & IO_PAGE_MASK, size >> IO_PAGE_SHIFT); | 607 | sbus_strbuf_flush(iommu, base & IO_PAGE_MASK, size >> IO_PAGE_SHIFT, direction); |
| 600 | spin_unlock_irqrestore(&iommu->lock, flags); | 608 | spin_unlock_irqrestore(&iommu->lock, flags); |
| 601 | } | 609 | } |
| 602 | 610 | ||
| @@ -620,7 +628,7 @@ void sbus_dma_sync_sg_for_cpu(struct sbus_dev *sdev, struct scatterlist *sg, int | |||
| 620 | size = IO_PAGE_ALIGN(sg[i].dma_address + sg[i].dma_length) - base; | 628 | size = IO_PAGE_ALIGN(sg[i].dma_address + sg[i].dma_length) - base; |
| 621 | 629 | ||
| 622 | spin_lock_irqsave(&iommu->lock, flags); | 630 | spin_lock_irqsave(&iommu->lock, flags); |
| 623 | sbus_strbuf_flush(iommu, base, size >> IO_PAGE_SHIFT); | 631 | sbus_strbuf_flush(iommu, base, size >> IO_PAGE_SHIFT, direction); |
| 624 | spin_unlock_irqrestore(&iommu->lock, flags); | 632 | spin_unlock_irqrestore(&iommu->lock, flags); |
| 625 | } | 633 | } |
| 626 | 634 | ||
diff --git a/include/asm-sparc64/iommu.h b/include/asm-sparc64/iommu.h index 5fd16e42a045..0de7a3da79cd 100644 --- a/include/asm-sparc64/iommu.h +++ b/include/asm-sparc64/iommu.h | |||
| @@ -16,4 +16,6 @@ | |||
| 16 | #define IOPTE_CACHE 0x0000000000000010UL /* Cached (in UPA E-cache) */ | 16 | #define IOPTE_CACHE 0x0000000000000010UL /* Cached (in UPA E-cache) */ |
| 17 | #define IOPTE_WRITE 0x0000000000000002UL /* Writeable */ | 17 | #define IOPTE_WRITE 0x0000000000000002UL /* Writeable */ |
| 18 | 18 | ||
| 19 | #define IOMMU_NUM_CTXS 4096 | ||
| 20 | |||
| 19 | #endif /* !(_SPARC_IOMMU_H) */ | 21 | #endif /* !(_SPARC_IOMMU_H) */ |
diff --git a/include/asm-sparc64/pbm.h b/include/asm-sparc64/pbm.h index 92999631c819..4c15610a2bac 100644 --- a/include/asm-sparc64/pbm.h +++ b/include/asm-sparc64/pbm.h | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | #include <asm/io.h> | 15 | #include <asm/io.h> |
| 16 | #include <asm/page.h> | 16 | #include <asm/page.h> |
| 17 | #include <asm/oplib.h> | 17 | #include <asm/oplib.h> |
| 18 | #include <asm/iommu.h> | ||
| 18 | 19 | ||
| 19 | /* The abstraction used here is that there are PCI controllers, | 20 | /* The abstraction used here is that there are PCI controllers, |
| 20 | * each with one (Sabre) or two (PSYCHO/SCHIZO) PCI bus modules | 21 | * each with one (Sabre) or two (PSYCHO/SCHIZO) PCI bus modules |
| @@ -40,9 +41,6 @@ struct pci_iommu { | |||
| 40 | */ | 41 | */ |
| 41 | spinlock_t lock; | 42 | spinlock_t lock; |
| 42 | 43 | ||
| 43 | /* Context allocator. */ | ||
| 44 | unsigned int iommu_cur_ctx; | ||
| 45 | |||
| 46 | /* IOMMU page table, a linear array of ioptes. */ | 44 | /* IOMMU page table, a linear array of ioptes. */ |
| 47 | iopte_t *page_table; /* The page table itself. */ | 45 | iopte_t *page_table; /* The page table itself. */ |
| 48 | int page_table_sz_bits; /* log2 of ow many pages does it map? */ | 46 | int page_table_sz_bits; /* log2 of ow many pages does it map? */ |
| @@ -87,6 +85,10 @@ struct pci_iommu { | |||
| 87 | u16 flush; | 85 | u16 flush; |
| 88 | } alloc_info[PBM_NCLUSTERS]; | 86 | } alloc_info[PBM_NCLUSTERS]; |
| 89 | 87 | ||
| 88 | /* CTX allocation. */ | ||
| 89 | unsigned long ctx_lowest_free; | ||
| 90 | unsigned long ctx_bitmap[IOMMU_NUM_CTXS / (sizeof(unsigned long) * 8)]; | ||
| 91 | |||
| 90 | /* Here a PCI controller driver describes the areas of | 92 | /* Here a PCI controller driver describes the areas of |
| 91 | * PCI memory space where DMA to/from physical memory | 93 | * PCI memory space where DMA to/from physical memory |
| 92 | * are addressed. Drivers interrogate the PCI layer | 94 | * are addressed. Drivers interrogate the PCI layer |
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index 053a883247ba..eae84cc39d3f 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c | |||
| @@ -478,7 +478,7 @@ static int __init esp4_init(void) | |||
| 478 | { | 478 | { |
| 479 | struct xfrm_decap_state decap; | 479 | struct xfrm_decap_state decap; |
| 480 | 480 | ||
| 481 | if (sizeof(struct esp_decap_data) < | 481 | if (sizeof(struct esp_decap_data) > |
| 482 | sizeof(decap.decap_data)) { | 482 | sizeof(decap.decap_data)) { |
| 483 | extern void decap_data_too_small(void); | 483 | extern void decap_data_too_small(void); |
| 484 | 484 | ||
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c index e5746b674413..eda1fba431a4 100644 --- a/net/ipv4/netfilter/ip_queue.c +++ b/net/ipv4/netfilter/ip_queue.c | |||
| @@ -3,6 +3,7 @@ | |||
| 3 | * communicating with userspace via netlink. | 3 | * communicating with userspace via netlink. |
| 4 | * | 4 | * |
| 5 | * (C) 2000-2002 James Morris <jmorris@intercode.com.au> | 5 | * (C) 2000-2002 James Morris <jmorris@intercode.com.au> |
| 6 | * (C) 2003-2005 Netfilter Core Team <coreteam@netfilter.org> | ||
| 6 | * | 7 | * |
| 7 | * This program is free software; you can redistribute it and/or modify | 8 | * This program is free software; you can redistribute it and/or modify |
| 8 | * it under the terms of the GNU General Public License version 2 as | 9 | * it under the terms of the GNU General Public License version 2 as |
| @@ -17,6 +18,7 @@ | |||
| 17 | * 2005-01-10: Added /proc counter for dropped packets; fixed so | 18 | * 2005-01-10: Added /proc counter for dropped packets; fixed so |
| 18 | * packets aren't delivered to user space if they're going | 19 | * packets aren't delivered to user space if they're going |
| 19 | * to be dropped. | 20 | * to be dropped. |
| 21 | * 2005-05-26: local_bh_{disable,enable} around nf_reinject (Harald Welte) | ||
| 20 | * | 22 | * |
| 21 | */ | 23 | */ |
| 22 | #include <linux/module.h> | 24 | #include <linux/module.h> |
| @@ -71,7 +73,15 @@ static DECLARE_MUTEX(ipqnl_sem); | |||
| 71 | static void | 73 | static void |
| 72 | ipq_issue_verdict(struct ipq_queue_entry *entry, int verdict) | 74 | ipq_issue_verdict(struct ipq_queue_entry *entry, int verdict) |
| 73 | { | 75 | { |
| 76 | /* TCP input path (and probably other bits) assume to be called | ||
| 77 | * from softirq context, not from syscall, like ipq_issue_verdict is | ||
| 78 | * called. TCP input path deadlocks with locks taken from timer | ||
| 79 | * softirq, e.g. We therefore emulate this by local_bh_disable() */ | ||
| 80 | |||
| 81 | local_bh_disable(); | ||
| 74 | nf_reinject(entry->skb, entry->info, verdict); | 82 | nf_reinject(entry->skb, entry->info, verdict); |
| 83 | local_bh_enable(); | ||
| 84 | |||
| 75 | kfree(entry); | 85 | kfree(entry); |
| 76 | } | 86 | } |
| 77 | 87 | ||
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 4a6952e3fee9..7c24e64b443f 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
| @@ -738,7 +738,7 @@ int udp_ioctl(struct sock *sk, int cmd, unsigned long arg) | |||
| 738 | unsigned long amount; | 738 | unsigned long amount; |
| 739 | 739 | ||
| 740 | amount = 0; | 740 | amount = 0; |
| 741 | spin_lock_irq(&sk->sk_receive_queue.lock); | 741 | spin_lock_bh(&sk->sk_receive_queue.lock); |
| 742 | skb = skb_peek(&sk->sk_receive_queue); | 742 | skb = skb_peek(&sk->sk_receive_queue); |
| 743 | if (skb != NULL) { | 743 | if (skb != NULL) { |
| 744 | /* | 744 | /* |
| @@ -748,7 +748,7 @@ int udp_ioctl(struct sock *sk, int cmd, unsigned long arg) | |||
| 748 | */ | 748 | */ |
| 749 | amount = skb->len - sizeof(struct udphdr); | 749 | amount = skb->len - sizeof(struct udphdr); |
| 750 | } | 750 | } |
| 751 | spin_unlock_irq(&sk->sk_receive_queue.lock); | 751 | spin_unlock_bh(&sk->sk_receive_queue.lock); |
| 752 | return put_user(amount, (int __user *)arg); | 752 | return put_user(amount, (int __user *)arg); |
| 753 | } | 753 | } |
| 754 | 754 | ||
| @@ -848,12 +848,12 @@ csum_copy_err: | |||
| 848 | /* Clear queue. */ | 848 | /* Clear queue. */ |
| 849 | if (flags&MSG_PEEK) { | 849 | if (flags&MSG_PEEK) { |
| 850 | int clear = 0; | 850 | int clear = 0; |
| 851 | spin_lock_irq(&sk->sk_receive_queue.lock); | 851 | spin_lock_bh(&sk->sk_receive_queue.lock); |
| 852 | if (skb == skb_peek(&sk->sk_receive_queue)) { | 852 | if (skb == skb_peek(&sk->sk_receive_queue)) { |
| 853 | __skb_unlink(skb, &sk->sk_receive_queue); | 853 | __skb_unlink(skb, &sk->sk_receive_queue); |
| 854 | clear = 1; | 854 | clear = 1; |
| 855 | } | 855 | } |
| 856 | spin_unlock_irq(&sk->sk_receive_queue.lock); | 856 | spin_unlock_bh(&sk->sk_receive_queue.lock); |
| 857 | if (clear) | 857 | if (clear) |
| 858 | kfree_skb(skb); | 858 | kfree_skb(skb); |
| 859 | } | 859 | } |
| @@ -1334,7 +1334,7 @@ unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait) | |||
| 1334 | struct sk_buff_head *rcvq = &sk->sk_receive_queue; | 1334 | struct sk_buff_head *rcvq = &sk->sk_receive_queue; |
| 1335 | struct sk_buff *skb; | 1335 | struct sk_buff *skb; |
| 1336 | 1336 | ||
| 1337 | spin_lock_irq(&rcvq->lock); | 1337 | spin_lock_bh(&rcvq->lock); |
| 1338 | while ((skb = skb_peek(rcvq)) != NULL) { | 1338 | while ((skb = skb_peek(rcvq)) != NULL) { |
| 1339 | if (udp_checksum_complete(skb)) { | 1339 | if (udp_checksum_complete(skb)) { |
| 1340 | UDP_INC_STATS_BH(UDP_MIB_INERRORS); | 1340 | UDP_INC_STATS_BH(UDP_MIB_INERRORS); |
| @@ -1345,7 +1345,7 @@ unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait) | |||
| 1345 | break; | 1345 | break; |
| 1346 | } | 1346 | } |
| 1347 | } | 1347 | } |
| 1348 | spin_unlock_irq(&rcvq->lock); | 1348 | spin_unlock_bh(&rcvq->lock); |
| 1349 | 1349 | ||
| 1350 | /* nothing to see, move along */ | 1350 | /* nothing to see, move along */ |
| 1351 | if (skb == NULL) | 1351 | if (skb == NULL) |
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c index 8a3db9d95bab..d8bd2a569c7c 100644 --- a/net/sched/sch_dsmark.c +++ b/net/sched/sch_dsmark.c | |||
| @@ -18,7 +18,7 @@ | |||
| 18 | #include <asm/byteorder.h> | 18 | #include <asm/byteorder.h> |
| 19 | 19 | ||
| 20 | 20 | ||
| 21 | #if 1 /* control */ | 21 | #if 0 /* control */ |
| 22 | #define DPRINTK(format,args...) printk(KERN_DEBUG format,##args) | 22 | #define DPRINTK(format,args...) printk(KERN_DEBUG format,##args) |
| 23 | #else | 23 | #else |
| 24 | #define DPRINTK(format,args...) | 24 | #define DPRINTK(format,args...) |
| @@ -73,8 +73,13 @@ static int dsmark_graft(struct Qdisc *sch,unsigned long arg, | |||
| 73 | 73 | ||
| 74 | DPRINTK("dsmark_graft(sch %p,[qdisc %p],new %p,old %p)\n",sch,p,new, | 74 | DPRINTK("dsmark_graft(sch %p,[qdisc %p],new %p,old %p)\n",sch,p,new, |
| 75 | old); | 75 | old); |
| 76 | if (!new) | 76 | |
| 77 | new = &noop_qdisc; | 77 | if (new == NULL) { |
| 78 | new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops); | ||
| 79 | if (new == NULL) | ||
| 80 | new = &noop_qdisc; | ||
| 81 | } | ||
| 82 | |||
| 78 | sch_tree_lock(sch); | 83 | sch_tree_lock(sch); |
| 79 | *old = xchg(&p->q,new); | 84 | *old = xchg(&p->q,new); |
| 80 | if (*old) | 85 | if (*old) |
| @@ -163,14 +168,15 @@ static void dsmark_walk(struct Qdisc *sch,struct qdisc_walker *walker) | |||
| 163 | return; | 168 | return; |
| 164 | for (i = 0; i < p->indices; i++) { | 169 | for (i = 0; i < p->indices; i++) { |
| 165 | if (p->mask[i] == 0xff && !p->value[i]) | 170 | if (p->mask[i] == 0xff && !p->value[i]) |
| 166 | continue; | 171 | goto ignore; |
| 167 | if (walker->count >= walker->skip) { | 172 | if (walker->count >= walker->skip) { |
| 168 | if (walker->fn(sch, i+1, walker) < 0) { | 173 | if (walker->fn(sch, i+1, walker) < 0) { |
| 169 | walker->stop = 1; | 174 | walker->stop = 1; |
| 170 | break; | 175 | break; |
| 171 | } | 176 | } |
| 172 | } | 177 | } |
| 173 | walker->count++; | 178 | ignore: |
| 179 | walker->count++; | ||
| 174 | } | 180 | } |
| 175 | } | 181 | } |
| 176 | 182 | ||
