diff options
114 files changed, 1132 insertions, 1042 deletions
diff --git a/Documentation/scsi/ChangeLog.arcmsr b/Documentation/scsi/ChangeLog.arcmsr index de2bcacfa870..038a3e6ecaa4 100644 --- a/Documentation/scsi/ChangeLog.arcmsr +++ b/Documentation/scsi/ChangeLog.arcmsr | |||
@@ -109,4 +109,10 @@ | |||
109 | ** 8.replace pci_alloc_consistent()/pci_free_consistent() with kmalloc()/kfree() in arcmsr_iop_message_xfer() | 109 | ** 8.replace pci_alloc_consistent()/pci_free_consistent() with kmalloc()/kfree() in arcmsr_iop_message_xfer() |
110 | ** 9. fix the release of dma memory for type B in arcmsr_free_ccb_pool() | 110 | ** 9. fix the release of dma memory for type B in arcmsr_free_ccb_pool() |
111 | ** 10.fix the arcmsr_polling_hbb_ccbdone() | 111 | ** 10.fix the arcmsr_polling_hbb_ccbdone() |
112 | ** 1.20.00.15 02/27/2008 Erich Chen & Nick Cheng | ||
113 | ** 1.arcmsr_iop_message_xfer() is called from atomic context under the | ||
114 | ** queuecommand scsi_host_template handler. James Bottomley pointed out | ||
115 | ** that the current GFP_KERNEL|GFP_DMA flags are wrong: firstly we are in | ||
116 | ** atomic context, secondly this memory is not used for DMA. | ||
117 | ** Also removed some unneeded casts. Thanks to Daniel Drake <dsd@gentoo.org> | ||
112 | ************************************************************************** | 118 | ************************************************************************** |
diff --git a/arch/blackfin/kernel/fixed_code.S b/arch/blackfin/kernel/fixed_code.S index 90262691b11a..5ed47228a390 100644 --- a/arch/blackfin/kernel/fixed_code.S +++ b/arch/blackfin/kernel/fixed_code.S | |||
@@ -101,9 +101,9 @@ ENDPROC (_atomic_ior32) | |||
101 | 101 | ||
102 | .align 16 | 102 | .align 16 |
103 | /* | 103 | /* |
104 | * Atomic ior, 32 bit. | 104 | * Atomic and, 32 bit. |
105 | * Inputs: P0: memory address to use | 105 | * Inputs: P0: memory address to use |
106 | * R0: value to ior | 106 | * R0: value to and |
107 | * Outputs: R0: new contents of the memory address. | 107 | * Outputs: R0: new contents of the memory address. |
108 | * R1: previous contents of the memory address. | 108 | * R1: previous contents of the memory address. |
109 | */ | 109 | */ |
@@ -112,13 +112,13 @@ ENTRY(_atomic_and32) | |||
112 | R0 = R1 & R0; | 112 | R0 = R1 & R0; |
113 | [P0] = R0; | 113 | [P0] = R0; |
114 | rts; | 114 | rts; |
115 | ENDPROC (_atomic_ior32) | 115 | ENDPROC (_atomic_and32) |
116 | 116 | ||
117 | .align 16 | 117 | .align 16 |
118 | /* | 118 | /* |
119 | * Atomic ior, 32 bit. | 119 | * Atomic xor, 32 bit. |
120 | * Inputs: P0: memory address to use | 120 | * Inputs: P0: memory address to use |
121 | * R0: value to ior | 121 | * R0: value to xor |
122 | * Outputs: R0: new contents of the memory address. | 122 | * Outputs: R0: new contents of the memory address. |
123 | * R1: previous contents of the memory address. | 123 | * R1: previous contents of the memory address. |
124 | */ | 124 | */ |
@@ -127,7 +127,7 @@ ENTRY(_atomic_xor32) | |||
127 | R0 = R1 ^ R0; | 127 | R0 = R1 ^ R0; |
128 | [P0] = R0; | 128 | [P0] = R0; |
129 | rts; | 129 | rts; |
130 | ENDPROC (_atomic_ior32) | 130 | ENDPROC (_atomic_xor32) |
131 | 131 | ||
132 | .align 16 | 132 | .align 16 |
133 | /* | 133 | /* |
diff --git a/arch/blackfin/mach-bf548/boards/ezkit.c b/arch/blackfin/mach-bf548/boards/ezkit.c index a0950c1fd800..40846aa034c4 100644 --- a/arch/blackfin/mach-bf548/boards/ezkit.c +++ b/arch/blackfin/mach-bf548/boards/ezkit.c | |||
@@ -323,7 +323,7 @@ static struct platform_device bf5xx_nand_device = { | |||
323 | }; | 323 | }; |
324 | #endif | 324 | #endif |
325 | 325 | ||
326 | #if defined(CONFIG_SDH_BFIN) || defined(CONFIG_SDH_BFIN) | 326 | #if defined(CONFIG_SDH_BFIN) || defined(CONFIG_SDH_BFIN_MODULE) |
327 | static struct platform_device bf54x_sdh_device = { | 327 | static struct platform_device bf54x_sdh_device = { |
328 | .name = "bfin-sdh", | 328 | .name = "bfin-sdh", |
329 | .id = 0, | 329 | .id = 0, |
@@ -636,7 +636,7 @@ static struct platform_device *ezkit_devices[] __initdata = { | |||
636 | &bf5xx_nand_device, | 636 | &bf5xx_nand_device, |
637 | #endif | 637 | #endif |
638 | 638 | ||
639 | #if defined(CONFIG_SDH_BFIN) || defined(CONFIG_SDH_BFIN) | 639 | #if defined(CONFIG_SDH_BFIN) || defined(CONFIG_SDH_BFIN_MODULE) |
640 | &bf54x_sdh_device, | 640 | &bf54x_sdh_device, |
641 | #endif | 641 | #endif |
642 | 642 | ||
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S index 2cbb7a0bc38e..cee54cebbc65 100644 --- a/arch/blackfin/mach-common/entry.S +++ b/arch/blackfin/mach-common/entry.S | |||
@@ -1369,7 +1369,7 @@ ENTRY(_sys_call_table) | |||
1369 | .long _sys_epoll_pwait | 1369 | .long _sys_epoll_pwait |
1370 | .long _sys_utimensat | 1370 | .long _sys_utimensat |
1371 | .long _sys_signalfd | 1371 | .long _sys_signalfd |
1372 | .long _sys_ni_syscall | 1372 | .long _sys_timerfd_create |
1373 | .long _sys_eventfd /* 350 */ | 1373 | .long _sys_eventfd /* 350 */ |
1374 | .long _sys_pread64 | 1374 | .long _sys_pread64 |
1375 | .long _sys_pwrite64 | 1375 | .long _sys_pwrite64 |
@@ -1378,6 +1378,9 @@ ENTRY(_sys_call_table) | |||
1378 | .long _sys_get_robust_list /* 355 */ | 1378 | .long _sys_get_robust_list /* 355 */ |
1379 | .long _sys_fallocate | 1379 | .long _sys_fallocate |
1380 | .long _sys_semtimedop | 1380 | .long _sys_semtimedop |
1381 | .long _sys_timerfd_settime | ||
1382 | .long _sys_timerfd_gettime | ||
1383 | |||
1381 | .rept NR_syscalls-(.-_sys_call_table)/4 | 1384 | .rept NR_syscalls-(.-_sys_call_table)/4 |
1382 | .long _sys_ni_syscall | 1385 | .long _sys_ni_syscall |
1383 | .endr | 1386 | .endr |
diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c index 94e57109fad6..8f6bcfe1dada 100644 --- a/arch/ia64/hp/common/hwsw_iommu.c +++ b/arch/ia64/hp/common/hwsw_iommu.c | |||
@@ -71,7 +71,7 @@ hwsw_init (void) | |||
71 | #ifdef CONFIG_IA64_GENERIC | 71 | #ifdef CONFIG_IA64_GENERIC |
72 | /* Better to have normal DMA than panic */ | 72 | /* Better to have normal DMA than panic */ |
73 | printk(KERN_WARNING "%s: Failed to initialize software I/O TLB," | 73 | printk(KERN_WARNING "%s: Failed to initialize software I/O TLB," |
74 | " reverting to hpzx1 platform vector\n", __FUNCTION__); | 74 | " reverting to hpzx1 platform vector\n", __func__); |
75 | machvec_init("hpzx1"); | 75 | machvec_init("hpzx1"); |
76 | #else | 76 | #else |
77 | panic("Unable to initialize software I/O TLB services"); | 77 | panic("Unable to initialize software I/O TLB services"); |
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index a94445422cc6..523eae6d3e49 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c | |||
@@ -529,7 +529,7 @@ sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted, int use_hint) | |||
529 | base_mask = RESMAP_MASK(bits_wanted); | 529 | base_mask = RESMAP_MASK(bits_wanted); |
530 | mask = base_mask << bitshiftcnt; | 530 | mask = base_mask << bitshiftcnt; |
531 | 531 | ||
532 | DBG_RES("%s() o %ld %p", __FUNCTION__, o, res_ptr); | 532 | DBG_RES("%s() o %ld %p", __func__, o, res_ptr); |
533 | for(; res_ptr < res_end ; res_ptr++) | 533 | for(; res_ptr < res_end ; res_ptr++) |
534 | { | 534 | { |
535 | DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr); | 535 | DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr); |
@@ -679,7 +679,7 @@ sba_alloc_range(struct ioc *ioc, size_t size) | |||
679 | #endif | 679 | #endif |
680 | 680 | ||
681 | DBG_RES("%s(%x) %d -> %lx hint %x/%x\n", | 681 | DBG_RES("%s(%x) %d -> %lx hint %x/%x\n", |
682 | __FUNCTION__, size, pages_needed, pide, | 682 | __func__, size, pages_needed, pide, |
683 | (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map), | 683 | (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map), |
684 | ioc->res_bitshift ); | 684 | ioc->res_bitshift ); |
685 | 685 | ||
@@ -722,8 +722,8 @@ sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size) | |||
722 | m = RESMAP_MASK(bits_not_wanted) << (pide & (BITS_PER_LONG - 1)); | 722 | m = RESMAP_MASK(bits_not_wanted) << (pide & (BITS_PER_LONG - 1)); |
723 | bits_not_wanted = 0; | 723 | bits_not_wanted = 0; |
724 | 724 | ||
725 | DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n", __FUNCTION__, (uint) iova, size, | 725 | DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n", __func__, (uint) iova, size, |
726 | bits_not_wanted, m, pide, res_ptr, *res_ptr); | 726 | bits_not_wanted, m, pide, res_ptr, *res_ptr); |
727 | 727 | ||
728 | ASSERT(m != 0); | 728 | ASSERT(m != 0); |
729 | ASSERT(bits_not_wanted); | 729 | ASSERT(bits_not_wanted); |
@@ -940,8 +940,7 @@ sba_map_single(struct device *dev, void *addr, size_t size, int dir) | |||
940 | 940 | ||
941 | iovp = (dma_addr_t) pide << iovp_shift; | 941 | iovp = (dma_addr_t) pide << iovp_shift; |
942 | 942 | ||
943 | DBG_RUN("%s() 0x%p -> 0x%lx\n", | 943 | DBG_RUN("%s() 0x%p -> 0x%lx\n", __func__, addr, (long) iovp | offset); |
944 | __FUNCTION__, addr, (long) iovp | offset); | ||
945 | 944 | ||
946 | pdir_start = &(ioc->pdir_base[pide]); | 945 | pdir_start = &(ioc->pdir_base[pide]); |
947 | 946 | ||
@@ -1029,8 +1028,7 @@ void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir) | |||
1029 | #endif | 1028 | #endif |
1030 | offset = iova & ~iovp_mask; | 1029 | offset = iova & ~iovp_mask; |
1031 | 1030 | ||
1032 | DBG_RUN("%s() iovp 0x%lx/%x\n", | 1031 | DBG_RUN("%s() iovp 0x%lx/%x\n", __func__, (long) iova, size); |
1033 | __FUNCTION__, (long) iova, size); | ||
1034 | 1032 | ||
1035 | iova ^= offset; /* clear offset bits */ | 1033 | iova ^= offset; /* clear offset bits */ |
1036 | size += offset; | 1034 | size += offset; |
@@ -1404,7 +1402,7 @@ int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int di | |||
1404 | struct scatterlist *sg; | 1402 | struct scatterlist *sg; |
1405 | #endif | 1403 | #endif |
1406 | 1404 | ||
1407 | DBG_RUN_SG("%s() START %d entries\n", __FUNCTION__, nents); | 1405 | DBG_RUN_SG("%s() START %d entries\n", __func__, nents); |
1408 | ioc = GET_IOC(dev); | 1406 | ioc = GET_IOC(dev); |
1409 | ASSERT(ioc); | 1407 | ASSERT(ioc); |
1410 | 1408 | ||
@@ -1468,7 +1466,7 @@ int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int di | |||
1468 | #endif | 1466 | #endif |
1469 | 1467 | ||
1470 | ASSERT(coalesced == filled); | 1468 | ASSERT(coalesced == filled); |
1471 | DBG_RUN_SG("%s() DONE %d mappings\n", __FUNCTION__, filled); | 1469 | DBG_RUN_SG("%s() DONE %d mappings\n", __func__, filled); |
1472 | 1470 | ||
1473 | return filled; | 1471 | return filled; |
1474 | } | 1472 | } |
@@ -1491,7 +1489,7 @@ void sba_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, in | |||
1491 | #endif | 1489 | #endif |
1492 | 1490 | ||
1493 | DBG_RUN_SG("%s() START %d entries, %p,%x\n", | 1491 | DBG_RUN_SG("%s() START %d entries, %p,%x\n", |
1494 | __FUNCTION__, nents, sba_sg_address(sglist), sglist->length); | 1492 | __func__, nents, sba_sg_address(sglist), sglist->length); |
1495 | 1493 | ||
1496 | #ifdef ASSERT_PDIR_SANITY | 1494 | #ifdef ASSERT_PDIR_SANITY |
1497 | ioc = GET_IOC(dev); | 1495 | ioc = GET_IOC(dev); |
@@ -1509,7 +1507,7 @@ void sba_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, in | |||
1509 | nents--; | 1507 | nents--; |
1510 | } | 1508 | } |
1511 | 1509 | ||
1512 | DBG_RUN_SG("%s() DONE (nents %d)\n", __FUNCTION__, nents); | 1510 | DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents); |
1513 | 1511 | ||
1514 | #ifdef ASSERT_PDIR_SANITY | 1512 | #ifdef ASSERT_PDIR_SANITY |
1515 | spin_lock_irqsave(&ioc->res_lock, flags); | 1513 | spin_lock_irqsave(&ioc->res_lock, flags); |
@@ -1546,7 +1544,7 @@ ioc_iova_init(struct ioc *ioc) | |||
1546 | ioc->iov_size = ~ioc->imask + 1; | 1544 | ioc->iov_size = ~ioc->imask + 1; |
1547 | 1545 | ||
1548 | DBG_INIT("%s() hpa %p IOV base 0x%lx mask 0x%lx (%dMB)\n", | 1546 | DBG_INIT("%s() hpa %p IOV base 0x%lx mask 0x%lx (%dMB)\n", |
1549 | __FUNCTION__, ioc->ioc_hpa, ioc->ibase, ioc->imask, | 1547 | __func__, ioc->ioc_hpa, ioc->ibase, ioc->imask, |
1550 | ioc->iov_size >> 20); | 1548 | ioc->iov_size >> 20); |
1551 | 1549 | ||
1552 | switch (iovp_size) { | 1550 | switch (iovp_size) { |
@@ -1569,7 +1567,7 @@ ioc_iova_init(struct ioc *ioc) | |||
1569 | 1567 | ||
1570 | memset(ioc->pdir_base, 0, ioc->pdir_size); | 1568 | memset(ioc->pdir_base, 0, ioc->pdir_size); |
1571 | 1569 | ||
1572 | DBG_INIT("%s() IOV page size %ldK pdir %p size %x\n", __FUNCTION__, | 1570 | DBG_INIT("%s() IOV page size %ldK pdir %p size %x\n", __func__, |
1573 | iovp_size >> 10, ioc->pdir_base, ioc->pdir_size); | 1571 | iovp_size >> 10, ioc->pdir_base, ioc->pdir_size); |
1574 | 1572 | ||
1575 | ASSERT(ALIGN((unsigned long) ioc->pdir_base, 4*1024) == (unsigned long) ioc->pdir_base); | 1573 | ASSERT(ALIGN((unsigned long) ioc->pdir_base, 4*1024) == (unsigned long) ioc->pdir_base); |
@@ -1612,7 +1610,7 @@ ioc_iova_init(struct ioc *ioc) | |||
1612 | 1610 | ||
1613 | prefetch_spill_page = virt_to_phys(addr); | 1611 | prefetch_spill_page = virt_to_phys(addr); |
1614 | 1612 | ||
1615 | DBG_INIT("%s() prefetch spill addr: 0x%lx\n", __FUNCTION__, prefetch_spill_page); | 1613 | DBG_INIT("%s() prefetch spill addr: 0x%lx\n", __func__, prefetch_spill_page); |
1616 | } | 1614 | } |
1617 | /* | 1615 | /* |
1618 | ** Set all the PDIR entries valid w/ the spill page as the target | 1616 | ** Set all the PDIR entries valid w/ the spill page as the target |
@@ -1641,7 +1639,7 @@ ioc_resource_init(struct ioc *ioc) | |||
1641 | /* resource map size dictated by pdir_size */ | 1639 | /* resource map size dictated by pdir_size */ |
1642 | ioc->res_size = ioc->pdir_size / PDIR_ENTRY_SIZE; /* entries */ | 1640 | ioc->res_size = ioc->pdir_size / PDIR_ENTRY_SIZE; /* entries */ |
1643 | ioc->res_size >>= 3; /* convert bit count to byte count */ | 1641 | ioc->res_size >>= 3; /* convert bit count to byte count */ |
1644 | DBG_INIT("%s() res_size 0x%x\n", __FUNCTION__, ioc->res_size); | 1642 | DBG_INIT("%s() res_size 0x%x\n", __func__, ioc->res_size); |
1645 | 1643 | ||
1646 | ioc->res_map = (char *) __get_free_pages(GFP_KERNEL, | 1644 | ioc->res_map = (char *) __get_free_pages(GFP_KERNEL, |
1647 | get_order(ioc->res_size)); | 1645 | get_order(ioc->res_size)); |
@@ -1664,7 +1662,7 @@ ioc_resource_init(struct ioc *ioc) | |||
1664 | | prefetch_spill_page); | 1662 | | prefetch_spill_page); |
1665 | #endif | 1663 | #endif |
1666 | 1664 | ||
1667 | DBG_INIT("%s() res_map %x %p\n", __FUNCTION__, | 1665 | DBG_INIT("%s() res_map %x %p\n", __func__, |
1668 | ioc->res_size, (void *) ioc->res_map); | 1666 | ioc->res_size, (void *) ioc->res_map); |
1669 | } | 1667 | } |
1670 | 1668 | ||
@@ -1767,7 +1765,7 @@ ioc_init(u64 hpa, void *handle) | |||
1767 | iovp_size = (1 << iovp_shift); | 1765 | iovp_size = (1 << iovp_shift); |
1768 | iovp_mask = ~(iovp_size - 1); | 1766 | iovp_mask = ~(iovp_size - 1); |
1769 | 1767 | ||
1770 | DBG_INIT("%s: PAGE_SIZE %ldK, iovp_size %ldK\n", __FUNCTION__, | 1768 | DBG_INIT("%s: PAGE_SIZE %ldK, iovp_size %ldK\n", __func__, |
1771 | PAGE_SIZE >> 10, iovp_size >> 10); | 1769 | PAGE_SIZE >> 10, iovp_size >> 10); |
1772 | 1770 | ||
1773 | if (!ioc->name) { | 1771 | if (!ioc->name) { |
@@ -2137,7 +2135,7 @@ sba_page_override(char *str) | |||
2137 | break; | 2135 | break; |
2138 | default: | 2136 | default: |
2139 | printk("%s: unknown/unsupported iommu page size %ld\n", | 2137 | printk("%s: unknown/unsupported iommu page size %ld\n", |
2140 | __FUNCTION__, page_size); | 2138 | __func__, page_size); |
2141 | } | 2139 | } |
2142 | 2140 | ||
2143 | return 1; | 2141 | return 1; |
diff --git a/arch/ia64/hp/sim/simeth.c b/arch/ia64/hp/sim/simeth.c index 9898febf609a..969fe9f443c4 100644 --- a/arch/ia64/hp/sim/simeth.c +++ b/arch/ia64/hp/sim/simeth.c | |||
@@ -222,7 +222,7 @@ simeth_probe1(void) | |||
222 | } | 222 | } |
223 | 223 | ||
224 | if ((rc = assign_irq_vector(AUTO_ASSIGN)) < 0) | 224 | if ((rc = assign_irq_vector(AUTO_ASSIGN)) < 0) |
225 | panic("%s: out of interrupt vectors!\n", __FUNCTION__); | 225 | panic("%s: out of interrupt vectors!\n", __func__); |
226 | dev->irq = rc; | 226 | dev->irq = rc; |
227 | 227 | ||
228 | /* | 228 | /* |
diff --git a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c index ef252df50e1e..eb0c32a85fd7 100644 --- a/arch/ia64/hp/sim/simserial.c +++ b/arch/ia64/hp/sim/simserial.c | |||
@@ -1000,7 +1000,7 @@ simrs_init (void) | |||
1000 | if (!state->irq) { | 1000 | if (!state->irq) { |
1001 | if ((rc = assign_irq_vector(AUTO_ASSIGN)) < 0) | 1001 | if ((rc = assign_irq_vector(AUTO_ASSIGN)) < 0) |
1002 | panic("%s: out of interrupt vectors!\n", | 1002 | panic("%s: out of interrupt vectors!\n", |
1003 | __FUNCTION__); | 1003 | __func__); |
1004 | state->irq = rc; | 1004 | state->irq = rc; |
1005 | ia64_ssc_connect_irq(KEYBOARD_INTR, state->irq); | 1005 | ia64_ssc_connect_irq(KEYBOARD_INTR, state->irq); |
1006 | } | 1006 | } |
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c index d025a22eb225..b1bf51fe97b4 100644 --- a/arch/ia64/ia32/sys_ia32.c +++ b/arch/ia64/ia32/sys_ia32.c | |||
@@ -32,13 +32,8 @@ | |||
32 | #include <linux/shm.h> | 32 | #include <linux/shm.h> |
33 | #include <linux/slab.h> | 33 | #include <linux/slab.h> |
34 | #include <linux/uio.h> | 34 | #include <linux/uio.h> |
35 | #include <linux/nfs_fs.h> | 35 | #include <linux/socket.h> |
36 | #include <linux/quota.h> | 36 | #include <linux/quota.h> |
37 | #include <linux/sunrpc/svc.h> | ||
38 | #include <linux/nfsd/nfsd.h> | ||
39 | #include <linux/nfsd/cache.h> | ||
40 | #include <linux/nfsd/xdr.h> | ||
41 | #include <linux/nfsd/syscall.h> | ||
42 | #include <linux/poll.h> | 37 | #include <linux/poll.h> |
43 | #include <linux/eventpoll.h> | 38 | #include <linux/eventpoll.h> |
44 | #include <linux/personality.h> | 39 | #include <linux/personality.h> |
diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c index f1cf2df97a2d..fbe742ad2fde 100644 --- a/arch/ia64/kernel/crash.c +++ b/arch/ia64/kernel/crash.c | |||
@@ -155,7 +155,7 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data) | |||
155 | if (val == DIE_INIT_MONARCH_LEAVE) | 155 | if (val == DIE_INIT_MONARCH_LEAVE) |
156 | ia64_mca_printk(KERN_NOTICE | 156 | ia64_mca_printk(KERN_NOTICE |
157 | "%s: kdump not configured\n", | 157 | "%s: kdump not configured\n", |
158 | __FUNCTION__); | 158 | __func__); |
159 | return NOTIFY_DONE; | 159 | return NOTIFY_DONE; |
160 | } | 160 | } |
161 | 161 | ||
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c index 919070a9aed7..728d7247a1a6 100644 --- a/arch/ia64/kernel/efi.c +++ b/arch/ia64/kernel/efi.c | |||
@@ -379,8 +379,8 @@ efi_get_pal_addr (void) | |||
379 | * a dedicated ITR for the PAL code. | 379 | * a dedicated ITR for the PAL code. |
380 | */ | 380 | */ |
381 | if ((vaddr & mask) == (KERNEL_START & mask)) { | 381 | if ((vaddr & mask) == (KERNEL_START & mask)) { |
382 | printk(KERN_INFO "%s: no need to install ITR for " | 382 | printk(KERN_INFO "%s: no need to install ITR for PAL code\n", |
383 | "PAL code\n", __FUNCTION__); | 383 | __func__); |
384 | continue; | 384 | continue; |
385 | } | 385 | } |
386 | 386 | ||
@@ -399,7 +399,7 @@ efi_get_pal_addr (void) | |||
399 | return __va(md->phys_addr); | 399 | return __va(md->phys_addr); |
400 | } | 400 | } |
401 | printk(KERN_WARNING "%s: no PAL-code memory-descriptor found\n", | 401 | printk(KERN_WARNING "%s: no PAL-code memory-descriptor found\n", |
402 | __FUNCTION__); | 402 | __func__); |
403 | return NULL; | 403 | return NULL; |
404 | } | 404 | } |
405 | 405 | ||
@@ -543,12 +543,30 @@ efi_init (void) | |||
543 | for (i = 0, p = efi_map_start; p < efi_map_end; | 543 | for (i = 0, p = efi_map_start; p < efi_map_end; |
544 | ++i, p += efi_desc_size) | 544 | ++i, p += efi_desc_size) |
545 | { | 545 | { |
546 | const char *unit; | ||
547 | unsigned long size; | ||
548 | |||
546 | md = p; | 549 | md = p; |
547 | printk("mem%02u: type=%u, attr=0x%lx, " | 550 | size = md->num_pages << EFI_PAGE_SHIFT; |
548 | "range=[0x%016lx-0x%016lx) (%luMB)\n", | 551 | |
552 | if ((size >> 40) > 0) { | ||
553 | size >>= 40; | ||
554 | unit = "TB"; | ||
555 | } else if ((size >> 30) > 0) { | ||
556 | size >>= 30; | ||
557 | unit = "GB"; | ||
558 | } else if ((size >> 20) > 0) { | ||
559 | size >>= 20; | ||
560 | unit = "MB"; | ||
561 | } else { | ||
562 | size >>= 10; | ||
563 | unit = "KB"; | ||
564 | } | ||
565 | |||
566 | printk("mem%02d: type=%2u, attr=0x%016lx, " | ||
567 | "range=[0x%016lx-0x%016lx) (%4lu%s)\n", | ||
549 | i, md->type, md->attribute, md->phys_addr, | 568 | i, md->type, md->attribute, md->phys_addr, |
550 | md->phys_addr + efi_md_size(md), | 569 | md->phys_addr + efi_md_size(md), size, unit); |
551 | md->num_pages >> (20 - EFI_PAGE_SHIFT)); | ||
552 | } | 570 | } |
553 | } | 571 | } |
554 | #endif | 572 | #endif |
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index 7b3292282dea..082c31dcfd99 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c | |||
@@ -534,7 +534,7 @@ iosapic_reassign_vector (int irq) | |||
534 | if (iosapic_intr_info[irq].count) { | 534 | if (iosapic_intr_info[irq].count) { |
535 | new_irq = create_irq(); | 535 | new_irq = create_irq(); |
536 | if (new_irq < 0) | 536 | if (new_irq < 0) |
537 | panic("%s: out of interrupt vectors!\n", __FUNCTION__); | 537 | panic("%s: out of interrupt vectors!\n", __func__); |
538 | printk(KERN_INFO "Reassigning vector %d to %d\n", | 538 | printk(KERN_INFO "Reassigning vector %d to %d\n", |
539 | irq_to_vector(irq), irq_to_vector(new_irq)); | 539 | irq_to_vector(irq), irq_to_vector(new_irq)); |
540 | memcpy(&iosapic_intr_info[new_irq], &iosapic_intr_info[irq], | 540 | memcpy(&iosapic_intr_info[new_irq], &iosapic_intr_info[irq], |
@@ -599,7 +599,7 @@ register_intr (unsigned int gsi, int irq, unsigned char delivery, | |||
599 | index = find_iosapic(gsi); | 599 | index = find_iosapic(gsi); |
600 | if (index < 0) { | 600 | if (index < 0) { |
601 | printk(KERN_WARNING "%s: No IOSAPIC for GSI %u\n", | 601 | printk(KERN_WARNING "%s: No IOSAPIC for GSI %u\n", |
602 | __FUNCTION__, gsi); | 602 | __func__, gsi); |
603 | return -ENODEV; | 603 | return -ENODEV; |
604 | } | 604 | } |
605 | 605 | ||
@@ -608,7 +608,7 @@ register_intr (unsigned int gsi, int irq, unsigned char delivery, | |||
608 | rte = iosapic_alloc_rte(); | 608 | rte = iosapic_alloc_rte(); |
609 | if (!rte) { | 609 | if (!rte) { |
610 | printk(KERN_WARNING "%s: cannot allocate memory\n", | 610 | printk(KERN_WARNING "%s: cannot allocate memory\n", |
611 | __FUNCTION__); | 611 | __func__); |
612 | return -ENOMEM; | 612 | return -ENOMEM; |
613 | } | 613 | } |
614 | 614 | ||
@@ -625,7 +625,7 @@ register_intr (unsigned int gsi, int irq, unsigned char delivery, | |||
625 | (info->trigger != trigger || info->polarity != polarity)){ | 625 | (info->trigger != trigger || info->polarity != polarity)){ |
626 | printk (KERN_WARNING | 626 | printk (KERN_WARNING |
627 | "%s: cannot override the interrupt\n", | 627 | "%s: cannot override the interrupt\n", |
628 | __FUNCTION__); | 628 | __func__); |
629 | return -EINVAL; | 629 | return -EINVAL; |
630 | } | 630 | } |
631 | rte->refcnt++; | 631 | rte->refcnt++; |
@@ -647,7 +647,7 @@ register_intr (unsigned int gsi, int irq, unsigned char delivery, | |||
647 | if (idesc->chip != &no_irq_type) | 647 | if (idesc->chip != &no_irq_type) |
648 | printk(KERN_WARNING | 648 | printk(KERN_WARNING |
649 | "%s: changing vector %d from %s to %s\n", | 649 | "%s: changing vector %d from %s to %s\n", |
650 | __FUNCTION__, irq_to_vector(irq), | 650 | __func__, irq_to_vector(irq), |
651 | idesc->chip->name, irq_type->name); | 651 | idesc->chip->name, irq_type->name); |
652 | idesc->chip = irq_type; | 652 | idesc->chip = irq_type; |
653 | } | 653 | } |
@@ -920,7 +920,7 @@ iosapic_register_platform_intr (u32 int_type, unsigned int gsi, | |||
920 | case ACPI_INTERRUPT_INIT: | 920 | case ACPI_INTERRUPT_INIT: |
921 | irq = create_irq(); | 921 | irq = create_irq(); |
922 | if (irq < 0) | 922 | if (irq < 0) |
923 | panic("%s: out of interrupt vectors!\n", __FUNCTION__); | 923 | panic("%s: out of interrupt vectors!\n", __func__); |
924 | vector = irq_to_vector(irq); | 924 | vector = irq_to_vector(irq); |
925 | delivery = IOSAPIC_INIT; | 925 | delivery = IOSAPIC_INIT; |
926 | break; | 926 | break; |
@@ -931,7 +931,7 @@ iosapic_register_platform_intr (u32 int_type, unsigned int gsi, | |||
931 | mask = 1; | 931 | mask = 1; |
932 | break; | 932 | break; |
933 | default: | 933 | default: |
934 | printk(KERN_ERR "%s: invalid int type 0x%x\n", __FUNCTION__, | 934 | printk(KERN_ERR "%s: invalid int type 0x%x\n", __func__, |
935 | int_type); | 935 | int_type); |
936 | return -1; | 936 | return -1; |
937 | } | 937 | } |
@@ -996,7 +996,7 @@ iosapic_system_init (int system_pcat_compat) | |||
996 | */ | 996 | */ |
997 | printk(KERN_INFO | 997 | printk(KERN_INFO |
998 | "%s: Disabling PC-AT compatible 8259 interrupts\n", | 998 | "%s: Disabling PC-AT compatible 8259 interrupts\n", |
999 | __FUNCTION__); | 999 | __func__); |
1000 | outb(0xff, 0xA1); | 1000 | outb(0xff, 0xA1); |
1001 | outb(0xff, 0x21); | 1001 | outb(0xff, 0x21); |
1002 | } | 1002 | } |
@@ -1011,7 +1011,7 @@ iosapic_alloc (void) | |||
1011 | if (!iosapic_lists[index].addr) | 1011 | if (!iosapic_lists[index].addr) |
1012 | return index; | 1012 | return index; |
1013 | 1013 | ||
1014 | printk(KERN_WARNING "%s: failed to allocate iosapic\n", __FUNCTION__); | 1014 | printk(KERN_WARNING "%s: failed to allocate iosapic\n", __func__); |
1015 | return -1; | 1015 | return -1; |
1016 | } | 1016 | } |
1017 | 1017 | ||
@@ -1109,14 +1109,14 @@ iosapic_remove (unsigned int gsi_base) | |||
1109 | index = find_iosapic(gsi_base); | 1109 | index = find_iosapic(gsi_base); |
1110 | if (index < 0) { | 1110 | if (index < 0) { |
1111 | printk(KERN_WARNING "%s: No IOSAPIC for GSI base %u\n", | 1111 | printk(KERN_WARNING "%s: No IOSAPIC for GSI base %u\n", |
1112 | __FUNCTION__, gsi_base); | 1112 | __func__, gsi_base); |
1113 | goto out; | 1113 | goto out; |
1114 | } | 1114 | } |
1115 | 1115 | ||
1116 | if (iosapic_lists[index].rtes_inuse) { | 1116 | if (iosapic_lists[index].rtes_inuse) { |
1117 | err = -EBUSY; | 1117 | err = -EBUSY; |
1118 | printk(KERN_WARNING "%s: IOSAPIC for GSI base %u is busy\n", | 1118 | printk(KERN_WARNING "%s: IOSAPIC for GSI base %u is busy\n", |
1119 | __FUNCTION__, gsi_base); | 1119 | __func__, gsi_base); |
1120 | goto out; | 1120 | goto out; |
1121 | } | 1121 | } |
1122 | 1122 | ||
@@ -1137,7 +1137,7 @@ map_iosapic_to_node(unsigned int gsi_base, int node) | |||
1137 | index = find_iosapic(gsi_base); | 1137 | index = find_iosapic(gsi_base); |
1138 | if (index < 0) { | 1138 | if (index < 0) { |
1139 | printk(KERN_WARNING "%s: No IOSAPIC for GSI %u\n", | 1139 | printk(KERN_WARNING "%s: No IOSAPIC for GSI %u\n", |
1140 | __FUNCTION__, gsi_base); | 1140 | __func__, gsi_base); |
1141 | return; | 1141 | return; |
1142 | } | 1142 | } |
1143 | iosapic_lists[index].node = node; | 1143 | iosapic_lists[index].node = node; |
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index 2b8cf6e85af4..d8be23fbe6bc 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c | |||
@@ -507,7 +507,7 @@ ia64_handle_irq (ia64_vector vector, struct pt_regs *regs) | |||
507 | if (unlikely(irq < 0)) { | 507 | if (unlikely(irq < 0)) { |
508 | printk(KERN_ERR "%s: Unexpected interrupt " | 508 | printk(KERN_ERR "%s: Unexpected interrupt " |
509 | "vector %d on CPU %d is not mapped " | 509 | "vector %d on CPU %d is not mapped " |
510 | "to any IRQ!\n", __FUNCTION__, vector, | 510 | "to any IRQ!\n", __func__, vector, |
511 | smp_processor_id()); | 511 | smp_processor_id()); |
512 | } else | 512 | } else |
513 | generic_handle_irq(irq); | 513 | generic_handle_irq(irq); |
@@ -572,7 +572,7 @@ void ia64_process_pending_intr(void) | |||
572 | if (unlikely(irq < 0)) { | 572 | if (unlikely(irq < 0)) { |
573 | printk(KERN_ERR "%s: Unexpected interrupt " | 573 | printk(KERN_ERR "%s: Unexpected interrupt " |
574 | "vector %d on CPU %d not being mapped " | 574 | "vector %d on CPU %d not being mapped " |
575 | "to any IRQ!!\n", __FUNCTION__, vector, | 575 | "to any IRQ!!\n", __func__, vector, |
576 | smp_processor_id()); | 576 | smp_processor_id()); |
577 | } else { | 577 | } else { |
578 | vectors_in_migration[irq]=0; | 578 | vectors_in_migration[irq]=0; |
@@ -666,11 +666,7 @@ ia64_send_ipi (int cpu, int vector, int delivery_mode, int redirect) | |||
666 | unsigned long ipi_data; | 666 | unsigned long ipi_data; |
667 | unsigned long phys_cpu_id; | 667 | unsigned long phys_cpu_id; |
668 | 668 | ||
669 | #ifdef CONFIG_SMP | ||
670 | phys_cpu_id = cpu_physical_id(cpu); | 669 | phys_cpu_id = cpu_physical_id(cpu); |
671 | #else | ||
672 | phys_cpu_id = (ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff; | ||
673 | #endif | ||
674 | 670 | ||
675 | /* | 671 | /* |
676 | * cpu number is in 8bit ID and 8bit EID | 672 | * cpu number is in 8bit ID and 8bit EID |
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c index 615c3d2b6348..8d9a446a0d17 100644 --- a/arch/ia64/kernel/kprobes.c +++ b/arch/ia64/kernel/kprobes.c | |||
@@ -838,7 +838,7 @@ out: | |||
838 | return 1; | 838 | return 1; |
839 | } | 839 | } |
840 | 840 | ||
841 | int __kprobes kprobes_fault_handler(struct pt_regs *regs, int trapnr) | 841 | int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) |
842 | { | 842 | { |
843 | struct kprobe *cur = kprobe_running(); | 843 | struct kprobe *cur = kprobe_running(); |
844 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | 844 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 6e17aed53135..6c18221dba36 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c | |||
@@ -413,8 +413,8 @@ ia64_log_get(int sal_info_type, u8 **buffer, int irq_safe) | |||
413 | IA64_LOG_INDEX_INC(sal_info_type); | 413 | IA64_LOG_INDEX_INC(sal_info_type); |
414 | IA64_LOG_UNLOCK(sal_info_type); | 414 | IA64_LOG_UNLOCK(sal_info_type); |
415 | if (irq_safe) { | 415 | if (irq_safe) { |
416 | IA64_MCA_DEBUG("%s: SAL error record type %d retrieved. " | 416 | IA64_MCA_DEBUG("%s: SAL error record type %d retrieved. Record length = %ld\n", |
417 | "Record length = %ld\n", __FUNCTION__, sal_info_type, total_len); | 417 | __func__, sal_info_type, total_len); |
418 | } | 418 | } |
419 | *buffer = (u8 *) log_buffer; | 419 | *buffer = (u8 *) log_buffer; |
420 | return total_len; | 420 | return total_len; |
@@ -518,7 +518,7 @@ ia64_mca_cpe_int_handler (int cpe_irq, void *arg) | |||
518 | static DEFINE_SPINLOCK(cpe_history_lock); | 518 | static DEFINE_SPINLOCK(cpe_history_lock); |
519 | 519 | ||
520 | IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n", | 520 | IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n", |
521 | __FUNCTION__, cpe_irq, smp_processor_id()); | 521 | __func__, cpe_irq, smp_processor_id()); |
522 | 522 | ||
523 | /* SAL spec states this should run w/ interrupts enabled */ | 523 | /* SAL spec states this should run w/ interrupts enabled */ |
524 | local_irq_enable(); | 524 | local_irq_enable(); |
@@ -594,7 +594,7 @@ ia64_mca_register_cpev (int cpev) | |||
594 | } | 594 | } |
595 | 595 | ||
596 | IA64_MCA_DEBUG("%s: corrected platform error " | 596 | IA64_MCA_DEBUG("%s: corrected platform error " |
597 | "vector %#x registered\n", __FUNCTION__, cpev); | 597 | "vector %#x registered\n", __func__, cpev); |
598 | } | 598 | } |
599 | #endif /* CONFIG_ACPI */ | 599 | #endif /* CONFIG_ACPI */ |
600 | 600 | ||
@@ -621,12 +621,11 @@ ia64_mca_cmc_vector_setup (void) | |||
621 | cmcv.cmcv_vector = IA64_CMC_VECTOR; | 621 | cmcv.cmcv_vector = IA64_CMC_VECTOR; |
622 | ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval); | 622 | ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval); |
623 | 623 | ||
624 | IA64_MCA_DEBUG("%s: CPU %d corrected " | 624 | IA64_MCA_DEBUG("%s: CPU %d corrected machine check vector %#x registered.\n", |
625 | "machine check vector %#x registered.\n", | 625 | __func__, smp_processor_id(), IA64_CMC_VECTOR); |
626 | __FUNCTION__, smp_processor_id(), IA64_CMC_VECTOR); | ||
627 | 626 | ||
628 | IA64_MCA_DEBUG("%s: CPU %d CMCV = %#016lx\n", | 627 | IA64_MCA_DEBUG("%s: CPU %d CMCV = %#016lx\n", |
629 | __FUNCTION__, smp_processor_id(), ia64_getreg(_IA64_REG_CR_CMCV)); | 628 | __func__, smp_processor_id(), ia64_getreg(_IA64_REG_CR_CMCV)); |
630 | } | 629 | } |
631 | 630 | ||
632 | /* | 631 | /* |
@@ -651,9 +650,8 @@ ia64_mca_cmc_vector_disable (void *dummy) | |||
651 | cmcv.cmcv_mask = 1; /* Mask/disable interrupt */ | 650 | cmcv.cmcv_mask = 1; /* Mask/disable interrupt */ |
652 | ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval); | 651 | ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval); |
653 | 652 | ||
654 | IA64_MCA_DEBUG("%s: CPU %d corrected " | 653 | IA64_MCA_DEBUG("%s: CPU %d corrected machine check vector %#x disabled.\n", |
655 | "machine check vector %#x disabled.\n", | 654 | __func__, smp_processor_id(), cmcv.cmcv_vector); |
656 | __FUNCTION__, smp_processor_id(), cmcv.cmcv_vector); | ||
657 | } | 655 | } |
658 | 656 | ||
659 | /* | 657 | /* |
@@ -678,9 +676,8 @@ ia64_mca_cmc_vector_enable (void *dummy) | |||
678 | cmcv.cmcv_mask = 0; /* Unmask/enable interrupt */ | 676 | cmcv.cmcv_mask = 0; /* Unmask/enable interrupt */ |
679 | ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval); | 677 | ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval); |
680 | 678 | ||
681 | IA64_MCA_DEBUG("%s: CPU %d corrected " | 679 | IA64_MCA_DEBUG("%s: CPU %d corrected machine check vector %#x enabled.\n", |
682 | "machine check vector %#x enabled.\n", | 680 | __func__, smp_processor_id(), cmcv.cmcv_vector); |
683 | __FUNCTION__, smp_processor_id(), cmcv.cmcv_vector); | ||
684 | } | 681 | } |
685 | 682 | ||
686 | /* | 683 | /* |
@@ -767,7 +764,7 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg) | |||
767 | local_irq_save(flags); | 764 | local_irq_save(flags); |
768 | if (notify_die(DIE_MCA_RENDZVOUS_ENTER, "MCA", get_irq_regs(), | 765 | if (notify_die(DIE_MCA_RENDZVOUS_ENTER, "MCA", get_irq_regs(), |
769 | (long)&nd, 0, 0) == NOTIFY_STOP) | 766 | (long)&nd, 0, 0) == NOTIFY_STOP) |
770 | ia64_mca_spin(__FUNCTION__); | 767 | ia64_mca_spin(__func__); |
771 | 768 | ||
772 | ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_DONE; | 769 | ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_DONE; |
773 | /* Register with the SAL monarch that the slave has | 770 | /* Register with the SAL monarch that the slave has |
@@ -777,7 +774,7 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg) | |||
777 | 774 | ||
778 | if (notify_die(DIE_MCA_RENDZVOUS_PROCESS, "MCA", get_irq_regs(), | 775 | if (notify_die(DIE_MCA_RENDZVOUS_PROCESS, "MCA", get_irq_regs(), |
779 | (long)&nd, 0, 0) == NOTIFY_STOP) | 776 | (long)&nd, 0, 0) == NOTIFY_STOP) |
780 | ia64_mca_spin(__FUNCTION__); | 777 | ia64_mca_spin(__func__); |
781 | 778 | ||
782 | /* Wait for the monarch cpu to exit. */ | 779 | /* Wait for the monarch cpu to exit. */ |
783 | while (monarch_cpu != -1) | 780 | while (monarch_cpu != -1) |
@@ -785,7 +782,7 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg) | |||
785 | 782 | ||
786 | if (notify_die(DIE_MCA_RENDZVOUS_LEAVE, "MCA", get_irq_regs(), | 783 | if (notify_die(DIE_MCA_RENDZVOUS_LEAVE, "MCA", get_irq_regs(), |
787 | (long)&nd, 0, 0) == NOTIFY_STOP) | 784 | (long)&nd, 0, 0) == NOTIFY_STOP) |
788 | ia64_mca_spin(__FUNCTION__); | 785 | ia64_mca_spin(__func__); |
789 | 786 | ||
790 | ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; | 787 | ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; |
791 | /* Enable all interrupts */ | 788 | /* Enable all interrupts */ |
@@ -1230,7 +1227,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, | |||
1230 | 1227 | ||
1231 | if (notify_die(DIE_MCA_MONARCH_ENTER, "MCA", regs, (long)&nd, 0, 0) | 1228 | if (notify_die(DIE_MCA_MONARCH_ENTER, "MCA", regs, (long)&nd, 0, 0) |
1232 | == NOTIFY_STOP) | 1229 | == NOTIFY_STOP) |
1233 | ia64_mca_spin(__FUNCTION__); | 1230 | ia64_mca_spin(__func__); |
1234 | 1231 | ||
1235 | ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_CONCURRENT_MCA; | 1232 | ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_CONCURRENT_MCA; |
1236 | if (sos->monarch) { | 1233 | if (sos->monarch) { |
@@ -1246,7 +1243,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, | |||
1246 | ia64_mca_wakeup_all(); | 1243 | ia64_mca_wakeup_all(); |
1247 | if (notify_die(DIE_MCA_MONARCH_PROCESS, "MCA", regs, (long)&nd, 0, 0) | 1244 | if (notify_die(DIE_MCA_MONARCH_PROCESS, "MCA", regs, (long)&nd, 0, 0) |
1248 | == NOTIFY_STOP) | 1245 | == NOTIFY_STOP) |
1249 | ia64_mca_spin(__FUNCTION__); | 1246 | ia64_mca_spin(__func__); |
1250 | } else { | 1247 | } else { |
1251 | while (cpu_isset(cpu, mca_cpu)) | 1248 | while (cpu_isset(cpu, mca_cpu)) |
1252 | cpu_relax(); /* spin until monarch wakes us */ | 1249 | cpu_relax(); /* spin until monarch wakes us */ |
@@ -1276,7 +1273,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, | |||
1276 | } | 1273 | } |
1277 | if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover) | 1274 | if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover) |
1278 | == NOTIFY_STOP) | 1275 | == NOTIFY_STOP) |
1279 | ia64_mca_spin(__FUNCTION__); | 1276 | ia64_mca_spin(__func__); |
1280 | 1277 | ||
1281 | 1278 | ||
1282 | if (atomic_dec_return(&mca_count) > 0) { | 1279 | if (atomic_dec_return(&mca_count) > 0) { |
@@ -1328,7 +1325,7 @@ ia64_mca_cmc_int_handler(int cmc_irq, void *arg) | |||
1328 | static DEFINE_SPINLOCK(cmc_history_lock); | 1325 | static DEFINE_SPINLOCK(cmc_history_lock); |
1329 | 1326 | ||
1330 | IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n", | 1327 | IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n", |
1331 | __FUNCTION__, cmc_irq, smp_processor_id()); | 1328 | __func__, cmc_irq, smp_processor_id()); |
1332 | 1329 | ||
1333 | /* SAL spec states this should run w/ interrupts enabled */ | 1330 | /* SAL spec states this should run w/ interrupts enabled */ |
1334 | local_irq_enable(); | 1331 | local_irq_enable(); |
@@ -1614,7 +1611,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw, | |||
1614 | */ | 1611 | */ |
1615 | if (!sos->monarch && atomic_add_return(1, &slaves) == num_online_cpus()) { | 1612 | if (!sos->monarch && atomic_add_return(1, &slaves) == num_online_cpus()) { |
1616 | mprintk(KERN_WARNING "%s: Promoting cpu %d to monarch.\n", | 1613 | mprintk(KERN_WARNING "%s: Promoting cpu %d to monarch.\n", |
1617 | __FUNCTION__, cpu); | 1614 | __func__, cpu); |
1618 | atomic_dec(&slaves); | 1615 | atomic_dec(&slaves); |
1619 | sos->monarch = 1; | 1616 | sos->monarch = 1; |
1620 | } | 1617 | } |
@@ -1626,7 +1623,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw, | |||
1626 | */ | 1623 | */ |
1627 | if (sos->monarch && atomic_add_return(1, &monarchs) > 1) { | 1624 | if (sos->monarch && atomic_add_return(1, &monarchs) > 1) { |
1628 | mprintk(KERN_WARNING "%s: Demoting cpu %d to slave.\n", | 1625 | mprintk(KERN_WARNING "%s: Demoting cpu %d to slave.\n", |
1629 | __FUNCTION__, cpu); | 1626 | __func__, cpu); |
1630 | atomic_dec(&monarchs); | 1627 | atomic_dec(&monarchs); |
1631 | sos->monarch = 0; | 1628 | sos->monarch = 0; |
1632 | } | 1629 | } |
@@ -1637,15 +1634,15 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw, | |||
1637 | cpu_relax(); /* spin until monarch enters */ | 1634 | cpu_relax(); /* spin until monarch enters */ |
1638 | if (notify_die(DIE_INIT_SLAVE_ENTER, "INIT", regs, (long)&nd, 0, 0) | 1635 | if (notify_die(DIE_INIT_SLAVE_ENTER, "INIT", regs, (long)&nd, 0, 0) |
1639 | == NOTIFY_STOP) | 1636 | == NOTIFY_STOP) |
1640 | ia64_mca_spin(__FUNCTION__); | 1637 | ia64_mca_spin(__func__); |
1641 | if (notify_die(DIE_INIT_SLAVE_PROCESS, "INIT", regs, (long)&nd, 0, 0) | 1638 | if (notify_die(DIE_INIT_SLAVE_PROCESS, "INIT", regs, (long)&nd, 0, 0) |
1642 | == NOTIFY_STOP) | 1639 | == NOTIFY_STOP) |
1643 | ia64_mca_spin(__FUNCTION__); | 1640 | ia64_mca_spin(__func__); |
1644 | while (monarch_cpu != -1) | 1641 | while (monarch_cpu != -1) |
1645 | cpu_relax(); /* spin until monarch leaves */ | 1642 | cpu_relax(); /* spin until monarch leaves */ |
1646 | if (notify_die(DIE_INIT_SLAVE_LEAVE, "INIT", regs, (long)&nd, 0, 0) | 1643 | if (notify_die(DIE_INIT_SLAVE_LEAVE, "INIT", regs, (long)&nd, 0, 0) |
1647 | == NOTIFY_STOP) | 1644 | == NOTIFY_STOP) |
1648 | ia64_mca_spin(__FUNCTION__); | 1645 | ia64_mca_spin(__func__); |
1649 | mprintk("Slave on cpu %d returning to normal service.\n", cpu); | 1646 | mprintk("Slave on cpu %d returning to normal service.\n", cpu); |
1650 | set_curr_task(cpu, previous_current); | 1647 | set_curr_task(cpu, previous_current); |
1651 | ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; | 1648 | ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE; |
@@ -1656,7 +1653,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw, | |||
1656 | monarch_cpu = cpu; | 1653 | monarch_cpu = cpu; |
1657 | if (notify_die(DIE_INIT_MONARCH_ENTER, "INIT", regs, (long)&nd, 0, 0) | 1654 | if (notify_die(DIE_INIT_MONARCH_ENTER, "INIT", regs, (long)&nd, 0, 0) |
1658 | == NOTIFY_STOP) | 1655 | == NOTIFY_STOP) |
1659 | ia64_mca_spin(__FUNCTION__); | 1656 | ia64_mca_spin(__func__); |
1660 | 1657 | ||
1661 | /* | 1658 | /* |
1662 | * Wait for a bit. On some machines (e.g., HP's zx2000 and zx6000, INIT can be | 1659 | * Wait for a bit. On some machines (e.g., HP's zx2000 and zx6000, INIT can be |
@@ -1673,10 +1670,10 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw, | |||
1673 | */ | 1670 | */ |
1674 | if (notify_die(DIE_INIT_MONARCH_PROCESS, "INIT", regs, (long)&nd, 0, 0) | 1671 | if (notify_die(DIE_INIT_MONARCH_PROCESS, "INIT", regs, (long)&nd, 0, 0) |
1675 | == NOTIFY_STOP) | 1672 | == NOTIFY_STOP) |
1676 | ia64_mca_spin(__FUNCTION__); | 1673 | ia64_mca_spin(__func__); |
1677 | if (notify_die(DIE_INIT_MONARCH_LEAVE, "INIT", regs, (long)&nd, 0, 0) | 1674 | if (notify_die(DIE_INIT_MONARCH_LEAVE, "INIT", regs, (long)&nd, 0, 0) |
1678 | == NOTIFY_STOP) | 1675 | == NOTIFY_STOP) |
1679 | ia64_mca_spin(__FUNCTION__); | 1676 | ia64_mca_spin(__func__); |
1680 | mprintk("\nINIT dump complete. Monarch on cpu %d returning to normal service.\n", cpu); | 1677 | mprintk("\nINIT dump complete. Monarch on cpu %d returning to normal service.\n", cpu); |
1681 | atomic_dec(&monarchs); | 1678 | atomic_dec(&monarchs); |
1682 | set_curr_task(cpu, previous_current); | 1679 | set_curr_task(cpu, previous_current); |
@@ -1884,7 +1881,7 @@ ia64_mca_init(void) | |||
1884 | .priority = 0/* we need to notified last */ | 1881 | .priority = 0/* we need to notified last */ |
1885 | }; | 1882 | }; |
1886 | 1883 | ||
1887 | IA64_MCA_DEBUG("%s: begin\n", __FUNCTION__); | 1884 | IA64_MCA_DEBUG("%s: begin\n", __func__); |
1888 | 1885 | ||
1889 | /* Clear the Rendez checkin flag for all cpus */ | 1886 | /* Clear the Rendez checkin flag for all cpus */ |
1890 | for(i = 0 ; i < NR_CPUS; i++) | 1887 | for(i = 0 ; i < NR_CPUS; i++) |
@@ -1928,7 +1925,7 @@ ia64_mca_init(void) | |||
1928 | return; | 1925 | return; |
1929 | } | 1926 | } |
1930 | 1927 | ||
1931 | IA64_MCA_DEBUG("%s: registered MCA rendezvous spinloop and wakeup mech.\n", __FUNCTION__); | 1928 | IA64_MCA_DEBUG("%s: registered MCA rendezvous spinloop and wakeup mech.\n", __func__); |
1932 | 1929 | ||
1933 | ia64_mc_info.imi_mca_handler = ia64_tpa(mca_hldlr_ptr->fp); | 1930 | ia64_mc_info.imi_mca_handler = ia64_tpa(mca_hldlr_ptr->fp); |
1934 | /* | 1931 | /* |
@@ -1949,7 +1946,7 @@ ia64_mca_init(void) | |||
1949 | return; | 1946 | return; |
1950 | } | 1947 | } |
1951 | 1948 | ||
1952 | IA64_MCA_DEBUG("%s: registered OS MCA handler with SAL at 0x%lx, gp = 0x%lx\n", __FUNCTION__, | 1949 | IA64_MCA_DEBUG("%s: registered OS MCA handler with SAL at 0x%lx, gp = 0x%lx\n", __func__, |
1953 | ia64_mc_info.imi_mca_handler, ia64_tpa(mca_hldlr_ptr->gp)); | 1950 | ia64_mc_info.imi_mca_handler, ia64_tpa(mca_hldlr_ptr->gp)); |
1954 | 1951 | ||
1955 | /* | 1952 | /* |
@@ -1961,7 +1958,7 @@ ia64_mca_init(void) | |||
1961 | ia64_mc_info.imi_slave_init_handler = ia64_tpa(init_hldlr_ptr_slave->fp); | 1958 | ia64_mc_info.imi_slave_init_handler = ia64_tpa(init_hldlr_ptr_slave->fp); |
1962 | ia64_mc_info.imi_slave_init_handler_size = 0; | 1959 | ia64_mc_info.imi_slave_init_handler_size = 0; |
1963 | 1960 | ||
1964 | IA64_MCA_DEBUG("%s: OS INIT handler at %lx\n", __FUNCTION__, | 1961 | IA64_MCA_DEBUG("%s: OS INIT handler at %lx\n", __func__, |
1965 | ia64_mc_info.imi_monarch_init_handler); | 1962 | ia64_mc_info.imi_monarch_init_handler); |
1966 | 1963 | ||
1967 | /* Register the os init handler with SAL */ | 1964 | /* Register the os init handler with SAL */ |
@@ -1982,7 +1979,7 @@ ia64_mca_init(void) | |||
1982 | return; | 1979 | return; |
1983 | } | 1980 | } |
1984 | 1981 | ||
1985 | IA64_MCA_DEBUG("%s: registered OS INIT handler with SAL\n", __FUNCTION__); | 1982 | IA64_MCA_DEBUG("%s: registered OS INIT handler with SAL\n", __func__); |
1986 | 1983 | ||
1987 | /* | 1984 | /* |
1988 | * Configure the CMCI/P vector and handler. Interrupts for CMC are | 1985 | * Configure the CMCI/P vector and handler. Interrupts for CMC are |
@@ -2042,7 +2039,7 @@ ia64_mca_late_init(void) | |||
2042 | cmc_polling_enabled = 0; | 2039 | cmc_polling_enabled = 0; |
2043 | schedule_work(&cmc_enable_work); | 2040 | schedule_work(&cmc_enable_work); |
2044 | 2041 | ||
2045 | IA64_MCA_DEBUG("%s: CMCI/P setup and enabled.\n", __FUNCTION__); | 2042 | IA64_MCA_DEBUG("%s: CMCI/P setup and enabled.\n", __func__); |
2046 | 2043 | ||
2047 | #ifdef CONFIG_ACPI | 2044 | #ifdef CONFIG_ACPI |
2048 | /* Setup the CPEI/P vector and handler */ | 2045 | /* Setup the CPEI/P vector and handler */ |
@@ -2065,17 +2062,17 @@ ia64_mca_late_init(void) | |||
2065 | ia64_cpe_irq = irq; | 2062 | ia64_cpe_irq = irq; |
2066 | ia64_mca_register_cpev(cpe_vector); | 2063 | ia64_mca_register_cpev(cpe_vector); |
2067 | IA64_MCA_DEBUG("%s: CPEI/P setup and enabled.\n", | 2064 | IA64_MCA_DEBUG("%s: CPEI/P setup and enabled.\n", |
2068 | __FUNCTION__); | 2065 | __func__); |
2069 | return 0; | 2066 | return 0; |
2070 | } | 2067 | } |
2071 | printk(KERN_ERR "%s: Failed to find irq for CPE " | 2068 | printk(KERN_ERR "%s: Failed to find irq for CPE " |
2072 | "interrupt handler, vector %d\n", | 2069 | "interrupt handler, vector %d\n", |
2073 | __FUNCTION__, cpe_vector); | 2070 | __func__, cpe_vector); |
2074 | } | 2071 | } |
2075 | /* If platform doesn't support CPEI, get the timer going. */ | 2072 | /* If platform doesn't support CPEI, get the timer going. */ |
2076 | if (cpe_poll_enabled) { | 2073 | if (cpe_poll_enabled) { |
2077 | ia64_mca_cpe_poll(0UL); | 2074 | ia64_mca_cpe_poll(0UL); |
2078 | IA64_MCA_DEBUG("%s: CPEP setup and enabled.\n", __FUNCTION__); | 2075 | IA64_MCA_DEBUG("%s: CPEP setup and enabled.\n", __func__); |
2079 | } | 2076 | } |
2080 | } | 2077 | } |
2081 | #endif | 2078 | #endif |
diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c index e58f4367cf11..e83e2ea3b3e0 100644 --- a/arch/ia64/kernel/module.c +++ b/arch/ia64/kernel/module.c | |||
@@ -493,7 +493,7 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings, | |||
493 | mod->arch.opd->sh_addralign = 8; | 493 | mod->arch.opd->sh_addralign = 8; |
494 | mod->arch.opd->sh_size = fdescs * sizeof(struct fdesc); | 494 | mod->arch.opd->sh_size = fdescs * sizeof(struct fdesc); |
495 | DEBUGP("%s: core.plt=%lx, init.plt=%lx, got=%lx, fdesc=%lx\n", | 495 | DEBUGP("%s: core.plt=%lx, init.plt=%lx, got=%lx, fdesc=%lx\n", |
496 | __FUNCTION__, mod->arch.core_plt->sh_size, mod->arch.init_plt->sh_size, | 496 | __func__, mod->arch.core_plt->sh_size, mod->arch.init_plt->sh_size, |
497 | mod->arch.got->sh_size, mod->arch.opd->sh_size); | 497 | mod->arch.got->sh_size, mod->arch.opd->sh_size); |
498 | return 0; | 498 | return 0; |
499 | } | 499 | } |
@@ -585,7 +585,7 @@ get_plt (struct module *mod, const struct insn *insn, uint64_t value, int *okp) | |||
585 | #if ARCH_MODULE_DEBUG | 585 | #if ARCH_MODULE_DEBUG |
586 | if (plt_target(plt) != target_ip) { | 586 | if (plt_target(plt) != target_ip) { |
587 | printk("%s: mistargeted PLT: wanted %lx, got %lx\n", | 587 | printk("%s: mistargeted PLT: wanted %lx, got %lx\n", |
588 | __FUNCTION__, target_ip, plt_target(plt)); | 588 | __func__, target_ip, plt_target(plt)); |
589 | *okp = 0; | 589 | *okp = 0; |
590 | return 0; | 590 | return 0; |
591 | } | 591 | } |
@@ -703,7 +703,7 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend, | |||
703 | if (r_type == R_IA64_PCREL21BI) { | 703 | if (r_type == R_IA64_PCREL21BI) { |
704 | if (!is_internal(mod, val)) { | 704 | if (!is_internal(mod, val)) { |
705 | printk(KERN_ERR "%s: %s reloc against non-local symbol (%lx)\n", | 705 | printk(KERN_ERR "%s: %s reloc against non-local symbol (%lx)\n", |
706 | __FUNCTION__, reloc_name[r_type], val); | 706 | __func__, reloc_name[r_type], val); |
707 | return -ENOEXEC; | 707 | return -ENOEXEC; |
708 | } | 708 | } |
709 | format = RF_INSN21B; | 709 | format = RF_INSN21B; |
@@ -737,7 +737,7 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend, | |||
737 | case R_IA64_LDXMOV: | 737 | case R_IA64_LDXMOV: |
738 | if (gp_addressable(mod, val)) { | 738 | if (gp_addressable(mod, val)) { |
739 | /* turn "ld8" into "mov": */ | 739 | /* turn "ld8" into "mov": */ |
740 | DEBUGP("%s: patching ld8 at %p to mov\n", __FUNCTION__, location); | 740 | DEBUGP("%s: patching ld8 at %p to mov\n", __func__, location); |
741 | ia64_patch((u64) location, 0x1fff80fe000UL, 0x10000000000UL); | 741 | ia64_patch((u64) location, 0x1fff80fe000UL, 0x10000000000UL); |
742 | } | 742 | } |
743 | return 0; | 743 | return 0; |
@@ -771,7 +771,7 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend, | |||
771 | if (!ok) | 771 | if (!ok) |
772 | return -ENOEXEC; | 772 | return -ENOEXEC; |
773 | 773 | ||
774 | DEBUGP("%s: [%p]<-%016lx = %s(%lx)\n", __FUNCTION__, location, val, | 774 | DEBUGP("%s: [%p]<-%016lx = %s(%lx)\n", __func__, location, val, |
775 | reloc_name[r_type] ? reloc_name[r_type] : "?", sym->st_value + addend); | 775 | reloc_name[r_type] ? reloc_name[r_type] : "?", sym->st_value + addend); |
776 | 776 | ||
777 | switch (format) { | 777 | switch (format) { |
@@ -807,7 +807,7 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind | |||
807 | Elf64_Shdr *target_sec; | 807 | Elf64_Shdr *target_sec; |
808 | int ret; | 808 | int ret; |
809 | 809 | ||
810 | DEBUGP("%s: applying section %u (%u relocs) to %u\n", __FUNCTION__, | 810 | DEBUGP("%s: applying section %u (%u relocs) to %u\n", __func__, |
811 | relsec, n, sechdrs[relsec].sh_info); | 811 | relsec, n, sechdrs[relsec].sh_info); |
812 | 812 | ||
813 | target_sec = sechdrs + sechdrs[relsec].sh_info; | 813 | target_sec = sechdrs + sechdrs[relsec].sh_info; |
@@ -835,7 +835,7 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind | |||
835 | gp = mod->core_size / 2; | 835 | gp = mod->core_size / 2; |
836 | gp = (uint64_t) mod->module_core + ((gp + 7) & -8); | 836 | gp = (uint64_t) mod->module_core + ((gp + 7) & -8); |
837 | mod->arch.gp = gp; | 837 | mod->arch.gp = gp; |
838 | DEBUGP("%s: placing gp at 0x%lx\n", __FUNCTION__, gp); | 838 | DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp); |
839 | } | 839 | } |
840 | 840 | ||
841 | for (i = 0; i < n; i++) { | 841 | for (i = 0; i < n; i++) { |
@@ -903,7 +903,7 @@ register_unwind_table (struct module *mod) | |||
903 | init = start + num_core; | 903 | init = start + num_core; |
904 | } | 904 | } |
905 | 905 | ||
906 | DEBUGP("%s: name=%s, gp=%lx, num_init=%lu, num_core=%lu\n", __FUNCTION__, | 906 | DEBUGP("%s: name=%s, gp=%lx, num_init=%lu, num_core=%lu\n", __func__, |
907 | mod->name, mod->arch.gp, num_init, num_core); | 907 | mod->name, mod->arch.gp, num_init, num_core); |
908 | 908 | ||
909 | /* | 909 | /* |
@@ -912,13 +912,13 @@ register_unwind_table (struct module *mod) | |||
912 | if (num_core > 0) { | 912 | if (num_core > 0) { |
913 | mod->arch.core_unw_table = unw_add_unwind_table(mod->name, 0, mod->arch.gp, | 913 | mod->arch.core_unw_table = unw_add_unwind_table(mod->name, 0, mod->arch.gp, |
914 | core, core + num_core); | 914 | core, core + num_core); |
915 | DEBUGP("%s: core: handle=%p [%p-%p)\n", __FUNCTION__, | 915 | DEBUGP("%s: core: handle=%p [%p-%p)\n", __func__, |
916 | mod->arch.core_unw_table, core, core + num_core); | 916 | mod->arch.core_unw_table, core, core + num_core); |
917 | } | 917 | } |
918 | if (num_init > 0) { | 918 | if (num_init > 0) { |
919 | mod->arch.init_unw_table = unw_add_unwind_table(mod->name, 0, mod->arch.gp, | 919 | mod->arch.init_unw_table = unw_add_unwind_table(mod->name, 0, mod->arch.gp, |
920 | init, init + num_init); | 920 | init, init + num_init); |
921 | DEBUGP("%s: init: handle=%p [%p-%p)\n", __FUNCTION__, | 921 | DEBUGP("%s: init: handle=%p [%p-%p)\n", __func__, |
922 | mod->arch.init_unw_table, init, init + num_init); | 922 | mod->arch.init_unw_table, init, init + num_init); |
923 | } | 923 | } |
924 | } | 924 | } |
@@ -926,7 +926,7 @@ register_unwind_table (struct module *mod) | |||
926 | int | 926 | int |
927 | module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mod) | 927 | module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mod) |
928 | { | 928 | { |
929 | DEBUGP("%s: init: entry=%p\n", __FUNCTION__, mod->init); | 929 | DEBUGP("%s: init: entry=%p\n", __func__, mod->init); |
930 | if (mod->arch.unwind) | 930 | if (mod->arch.unwind) |
931 | register_unwind_table(mod); | 931 | register_unwind_table(mod); |
932 | return 0; | 932 | return 0; |
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index f6b99719f10f..a2aabfdc80d9 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c | |||
@@ -227,12 +227,12 @@ | |||
227 | #ifdef PFM_DEBUGGING | 227 | #ifdef PFM_DEBUGGING |
228 | #define DPRINT(a) \ | 228 | #define DPRINT(a) \ |
229 | do { \ | 229 | do { \ |
230 | if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \ | 230 | if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d [%d] ", __func__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \ |
231 | } while (0) | 231 | } while (0) |
232 | 232 | ||
233 | #define DPRINT_ovfl(a) \ | 233 | #define DPRINT_ovfl(a) \ |
234 | do { \ | 234 | do { \ |
235 | if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \ | 235 | if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d [%d] ", __func__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \ |
236 | } while (0) | 236 | } while (0) |
237 | #endif | 237 | #endif |
238 | 238 | ||
diff --git a/arch/ia64/kernel/perfmon_default_smpl.c b/arch/ia64/kernel/perfmon_default_smpl.c index a7af1cb419f9..5f637bbfcccd 100644 --- a/arch/ia64/kernel/perfmon_default_smpl.c +++ b/arch/ia64/kernel/perfmon_default_smpl.c | |||
@@ -24,12 +24,12 @@ MODULE_LICENSE("GPL"); | |||
24 | #ifdef DEFAULT_DEBUG | 24 | #ifdef DEFAULT_DEBUG |
25 | #define DPRINT(a) \ | 25 | #define DPRINT(a) \ |
26 | do { \ | 26 | do { \ |
27 | if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d ", __FUNCTION__, __LINE__, smp_processor_id()); printk a; } \ | 27 | if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d ", __func__, __LINE__, smp_processor_id()); printk a; } \ |
28 | } while (0) | 28 | } while (0) |
29 | 29 | ||
30 | #define DPRINT_ovfl(a) \ | 30 | #define DPRINT_ovfl(a) \ |
31 | do { \ | 31 | do { \ |
32 | if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d ", __FUNCTION__, __LINE__, smp_processor_id()); printk a; } \ | 32 | if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d ", __func__, __LINE__, smp_processor_id()); printk a; } \ |
33 | } while (0) | 33 | } while (0) |
34 | 34 | ||
35 | #else | 35 | #else |
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c index 331d6768b5d5..ab784ec4319d 100644 --- a/arch/ia64/kernel/ptrace.c +++ b/arch/ia64/kernel/ptrace.c | |||
@@ -698,52 +698,6 @@ thread_matches (struct task_struct *thread, unsigned long addr) | |||
698 | } | 698 | } |
699 | 699 | ||
700 | /* | 700 | /* |
701 | * GDB apparently wants to be able to read the register-backing store | ||
702 | * of any thread when attached to a given process. If we are peeking | ||
703 | * or poking an address that happens to reside in the kernel-backing | ||
704 | * store of another thread, we need to attach to that thread, because | ||
705 | * otherwise we end up accessing stale data. | ||
706 | * | ||
707 | * task_list_lock must be read-locked before calling this routine! | ||
708 | */ | ||
709 | static struct task_struct * | ||
710 | find_thread_for_addr (struct task_struct *child, unsigned long addr) | ||
711 | { | ||
712 | struct task_struct *p; | ||
713 | struct mm_struct *mm; | ||
714 | struct list_head *this, *next; | ||
715 | int mm_users; | ||
716 | |||
717 | if (!(mm = get_task_mm(child))) | ||
718 | return child; | ||
719 | |||
720 | /* -1 because of our get_task_mm(): */ | ||
721 | mm_users = atomic_read(&mm->mm_users) - 1; | ||
722 | if (mm_users <= 1) | ||
723 | goto out; /* not multi-threaded */ | ||
724 | |||
725 | /* | ||
726 | * Traverse the current process' children list. Every task that | ||
727 | * one attaches to becomes a child. And it is only attached children | ||
728 | * of the debugger that are of interest (ptrace_check_attach checks | ||
729 | * for this). | ||
730 | */ | ||
731 | list_for_each_safe(this, next, ¤t->children) { | ||
732 | p = list_entry(this, struct task_struct, sibling); | ||
733 | if (p->tgid != child->tgid) | ||
734 | continue; | ||
735 | if (thread_matches(p, addr)) { | ||
736 | child = p; | ||
737 | goto out; | ||
738 | } | ||
739 | } | ||
740 | |||
741 | out: | ||
742 | mmput(mm); | ||
743 | return child; | ||
744 | } | ||
745 | |||
746 | /* | ||
747 | * Write f32-f127 back to task->thread.fph if it has been modified. | 701 | * Write f32-f127 back to task->thread.fph if it has been modified. |
748 | */ | 702 | */ |
749 | inline void | 703 | inline void |
@@ -826,14 +780,14 @@ convert_to_non_syscall (struct task_struct *child, struct pt_regs *pt, | |||
826 | if ((long)((unsigned long)child + IA64_STK_OFFSET - sp) | 780 | if ((long)((unsigned long)child + IA64_STK_OFFSET - sp) |
827 | < IA64_PT_REGS_SIZE) { | 781 | < IA64_PT_REGS_SIZE) { |
828 | dprintk("ptrace.%s: ran off the top of the kernel " | 782 | dprintk("ptrace.%s: ran off the top of the kernel " |
829 | "stack\n", __FUNCTION__); | 783 | "stack\n", __func__); |
830 | return; | 784 | return; |
831 | } | 785 | } |
832 | if (unw_get_pr (&prev_info, &pr) < 0) { | 786 | if (unw_get_pr (&prev_info, &pr) < 0) { |
833 | unw_get_rp(&prev_info, &ip); | 787 | unw_get_rp(&prev_info, &ip); |
834 | dprintk("ptrace.%s: failed to read " | 788 | dprintk("ptrace.%s: failed to read " |
835 | "predicate register (ip=0x%lx)\n", | 789 | "predicate register (ip=0x%lx)\n", |
836 | __FUNCTION__, ip); | 790 | __func__, ip); |
837 | return; | 791 | return; |
838 | } | 792 | } |
839 | if (unw_is_intr_frame(&info) | 793 | if (unw_is_intr_frame(&info) |
@@ -908,7 +862,7 @@ static int | |||
908 | access_uarea (struct task_struct *child, unsigned long addr, | 862 | access_uarea (struct task_struct *child, unsigned long addr, |
909 | unsigned long *data, int write_access) | 863 | unsigned long *data, int write_access) |
910 | { | 864 | { |
911 | unsigned long *ptr, regnum, urbs_end, rnat_addr, cfm; | 865 | unsigned long *ptr, regnum, urbs_end, cfm; |
912 | struct switch_stack *sw; | 866 | struct switch_stack *sw; |
913 | struct pt_regs *pt; | 867 | struct pt_regs *pt; |
914 | # define pt_reg_addr(pt, reg) ((void *) \ | 868 | # define pt_reg_addr(pt, reg) ((void *) \ |
@@ -1011,14 +965,9 @@ access_uarea (struct task_struct *child, unsigned long addr, | |||
1011 | * the kernel was entered. | 965 | * the kernel was entered. |
1012 | * | 966 | * |
1013 | * Furthermore, when changing the contents of | 967 | * Furthermore, when changing the contents of |
1014 | * PT_AR_BSP (or PT_CFM) we MUST copy any | 968 | * PT_AR_BSP (or PT_CFM) while the task is |
1015 | * users-level stacked registers that are | 969 | * blocked in a system call, convert the state |
1016 | * stored on the kernel stack back to | 970 | * so that the non-system-call exit |
1017 | * user-space because otherwise, we might end | ||
1018 | * up clobbering kernel stacked registers. | ||
1019 | * Also, if this happens while the task is | ||
1020 | * blocked in a system call, which convert the | ||
1021 | * state such that the non-system-call exit | ||
1022 | * path is used. This ensures that the proper | 971 | * path is used. This ensures that the proper |
1023 | * state will be picked up when resuming | 972 | * state will be picked up when resuming |
1024 | * execution. However, it *also* means that | 973 | * execution. However, it *also* means that |
@@ -1035,10 +984,6 @@ access_uarea (struct task_struct *child, unsigned long addr, | |||
1035 | urbs_end = ia64_get_user_rbs_end(child, pt, &cfm); | 984 | urbs_end = ia64_get_user_rbs_end(child, pt, &cfm); |
1036 | if (write_access) { | 985 | if (write_access) { |
1037 | if (*data != urbs_end) { | 986 | if (*data != urbs_end) { |
1038 | if (ia64_sync_user_rbs(child, sw, | ||
1039 | pt->ar_bspstore, | ||
1040 | urbs_end) < 0) | ||
1041 | return -1; | ||
1042 | if (in_syscall(pt)) | 987 | if (in_syscall(pt)) |
1043 | convert_to_non_syscall(child, | 988 | convert_to_non_syscall(child, |
1044 | pt, | 989 | pt, |
@@ -1058,10 +1003,6 @@ access_uarea (struct task_struct *child, unsigned long addr, | |||
1058 | urbs_end = ia64_get_user_rbs_end(child, pt, &cfm); | 1003 | urbs_end = ia64_get_user_rbs_end(child, pt, &cfm); |
1059 | if (write_access) { | 1004 | if (write_access) { |
1060 | if (((cfm ^ *data) & PFM_MASK) != 0) { | 1005 | if (((cfm ^ *data) & PFM_MASK) != 0) { |
1061 | if (ia64_sync_user_rbs(child, sw, | ||
1062 | pt->ar_bspstore, | ||
1063 | urbs_end) < 0) | ||
1064 | return -1; | ||
1065 | if (in_syscall(pt)) | 1006 | if (in_syscall(pt)) |
1066 | convert_to_non_syscall(child, | 1007 | convert_to_non_syscall(child, |
1067 | pt, | 1008 | pt, |
@@ -1093,16 +1034,8 @@ access_uarea (struct task_struct *child, unsigned long addr, | |||
1093 | return 0; | 1034 | return 0; |
1094 | 1035 | ||
1095 | case PT_AR_RNAT: | 1036 | case PT_AR_RNAT: |
1096 | urbs_end = ia64_get_user_rbs_end(child, pt, NULL); | 1037 | ptr = pt_reg_addr(pt, ar_rnat); |
1097 | rnat_addr = (long) ia64_rse_rnat_addr((long *) | 1038 | break; |
1098 | urbs_end); | ||
1099 | if (write_access) | ||
1100 | return ia64_poke(child, sw, urbs_end, | ||
1101 | rnat_addr, *data); | ||
1102 | else | ||
1103 | return ia64_peek(child, sw, urbs_end, | ||
1104 | rnat_addr, data); | ||
1105 | |||
1106 | case PT_R1: | 1039 | case PT_R1: |
1107 | ptr = pt_reg_addr(pt, r1); | 1040 | ptr = pt_reg_addr(pt, r1); |
1108 | break; | 1041 | break; |
@@ -1521,215 +1454,97 @@ ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr) | |||
1521 | return ret; | 1454 | return ret; |
1522 | } | 1455 | } |
1523 | 1456 | ||
1524 | /* | ||
1525 | * Called by kernel/ptrace.c when detaching.. | ||
1526 | * | ||
1527 | * Make sure the single step bit is not set. | ||
1528 | */ | ||
1529 | void | 1457 | void |
1530 | ptrace_disable (struct task_struct *child) | 1458 | user_enable_single_step (struct task_struct *child) |
1531 | { | 1459 | { |
1532 | struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child)); | 1460 | struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child)); |
1533 | 1461 | ||
1534 | /* make sure the single step/taken-branch trap bits are not set: */ | 1462 | set_tsk_thread_flag(child, TIF_SINGLESTEP); |
1535 | clear_tsk_thread_flag(child, TIF_SINGLESTEP); | 1463 | child_psr->ss = 1; |
1536 | child_psr->ss = 0; | ||
1537 | child_psr->tb = 0; | ||
1538 | } | 1464 | } |
1539 | 1465 | ||
1540 | asmlinkage long | 1466 | void |
1541 | sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data) | 1467 | user_enable_block_step (struct task_struct *child) |
1542 | { | 1468 | { |
1543 | struct pt_regs *pt; | 1469 | struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child)); |
1544 | unsigned long urbs_end, peek_or_poke; | ||
1545 | struct task_struct *child; | ||
1546 | struct switch_stack *sw; | ||
1547 | long ret; | ||
1548 | struct unw_frame_info info; | ||
1549 | 1470 | ||
1550 | lock_kernel(); | 1471 | set_tsk_thread_flag(child, TIF_SINGLESTEP); |
1551 | ret = -EPERM; | 1472 | child_psr->tb = 1; |
1552 | if (request == PTRACE_TRACEME) { | 1473 | } |
1553 | ret = ptrace_traceme(); | ||
1554 | goto out; | ||
1555 | } | ||
1556 | 1474 | ||
1557 | peek_or_poke = (request == PTRACE_PEEKTEXT | 1475 | void |
1558 | || request == PTRACE_PEEKDATA | 1476 | user_disable_single_step (struct task_struct *child) |
1559 | || request == PTRACE_POKETEXT | 1477 | { |
1560 | || request == PTRACE_POKEDATA); | 1478 | struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child)); |
1561 | ret = -ESRCH; | ||
1562 | read_lock(&tasklist_lock); | ||
1563 | { | ||
1564 | child = find_task_by_pid(pid); | ||
1565 | if (child) { | ||
1566 | if (peek_or_poke) | ||
1567 | child = find_thread_for_addr(child, addr); | ||
1568 | get_task_struct(child); | ||
1569 | } | ||
1570 | } | ||
1571 | read_unlock(&tasklist_lock); | ||
1572 | if (!child) | ||
1573 | goto out; | ||
1574 | ret = -EPERM; | ||
1575 | if (pid == 1) /* no messing around with init! */ | ||
1576 | goto out_tsk; | ||
1577 | |||
1578 | if (request == PTRACE_ATTACH) { | ||
1579 | ret = ptrace_attach(child); | ||
1580 | if (!ret) | ||
1581 | arch_ptrace_attach(child); | ||
1582 | goto out_tsk; | ||
1583 | } | ||
1584 | 1479 | ||
1585 | ret = ptrace_check_attach(child, request == PTRACE_KILL); | 1480 | /* make sure the single step/taken-branch trap bits are not set: */ |
1586 | if (ret < 0) | 1481 | clear_tsk_thread_flag(child, TIF_SINGLESTEP); |
1587 | goto out_tsk; | 1482 | child_psr->ss = 0; |
1483 | child_psr->tb = 0; | ||
1484 | } | ||
1588 | 1485 | ||
1589 | pt = task_pt_regs(child); | 1486 | /* |
1590 | sw = (struct switch_stack *) (child->thread.ksp + 16); | 1487 | * Called by kernel/ptrace.c when detaching.. |
1488 | * | ||
1489 | * Make sure the single step bit is not set. | ||
1490 | */ | ||
1491 | void | ||
1492 | ptrace_disable (struct task_struct *child) | ||
1493 | { | ||
1494 | user_disable_single_step(child); | ||
1495 | } | ||
1591 | 1496 | ||
1497 | long | ||
1498 | arch_ptrace (struct task_struct *child, long request, long addr, long data) | ||
1499 | { | ||
1592 | switch (request) { | 1500 | switch (request) { |
1593 | case PTRACE_PEEKTEXT: | 1501 | case PTRACE_PEEKTEXT: |
1594 | case PTRACE_PEEKDATA: | 1502 | case PTRACE_PEEKDATA: |
1595 | /* read word at location addr */ | 1503 | /* read word at location addr */ |
1596 | urbs_end = ia64_get_user_rbs_end(child, pt, NULL); | 1504 | if (access_process_vm(child, addr, &data, sizeof(data), 0) |
1597 | ret = ia64_peek(child, sw, urbs_end, addr, &data); | 1505 | != sizeof(data)) |
1598 | if (ret == 0) { | 1506 | return -EIO; |
1599 | ret = data; | 1507 | /* ensure return value is not mistaken for error code */ |
1600 | /* ensure "ret" is not mistaken as an error code: */ | 1508 | force_successful_syscall_return(); |
1601 | force_successful_syscall_return(); | 1509 | return data; |
1602 | } | ||
1603 | goto out_tsk; | ||
1604 | |||
1605 | case PTRACE_POKETEXT: | ||
1606 | case PTRACE_POKEDATA: | ||
1607 | /* write the word at location addr */ | ||
1608 | urbs_end = ia64_get_user_rbs_end(child, pt, NULL); | ||
1609 | ret = ia64_poke(child, sw, urbs_end, addr, data); | ||
1610 | |||
1611 | /* Make sure user RBS has the latest data */ | ||
1612 | unw_init_from_blocked_task(&info, child); | ||
1613 | do_sync_rbs(&info, ia64_sync_user_rbs); | ||
1614 | 1510 | ||
1615 | goto out_tsk; | 1511 | /* PTRACE_POKETEXT and PTRACE_POKEDATA is handled |
1512 | * by the generic ptrace_request(). | ||
1513 | */ | ||
1616 | 1514 | ||
1617 | case PTRACE_PEEKUSR: | 1515 | case PTRACE_PEEKUSR: |
1618 | /* read the word at addr in the USER area */ | 1516 | /* read the word at addr in the USER area */ |
1619 | if (access_uarea(child, addr, &data, 0) < 0) { | 1517 | if (access_uarea(child, addr, &data, 0) < 0) |
1620 | ret = -EIO; | 1518 | return -EIO; |
1621 | goto out_tsk; | 1519 | /* ensure return value is not mistaken for error code */ |
1622 | } | ||
1623 | ret = data; | ||
1624 | /* ensure "ret" is not mistaken as an error code */ | ||
1625 | force_successful_syscall_return(); | 1520 | force_successful_syscall_return(); |
1626 | goto out_tsk; | 1521 | return data; |
1627 | 1522 | ||
1628 | case PTRACE_POKEUSR: | 1523 | case PTRACE_POKEUSR: |
1629 | /* write the word at addr in the USER area */ | 1524 | /* write the word at addr in the USER area */ |
1630 | if (access_uarea(child, addr, &data, 1) < 0) { | 1525 | if (access_uarea(child, addr, &data, 1) < 0) |
1631 | ret = -EIO; | 1526 | return -EIO; |
1632 | goto out_tsk; | 1527 | return 0; |
1633 | } | ||
1634 | ret = 0; | ||
1635 | goto out_tsk; | ||
1636 | 1528 | ||
1637 | case PTRACE_OLD_GETSIGINFO: | 1529 | case PTRACE_OLD_GETSIGINFO: |
1638 | /* for backwards-compatibility */ | 1530 | /* for backwards-compatibility */ |
1639 | ret = ptrace_request(child, PTRACE_GETSIGINFO, addr, data); | 1531 | return ptrace_request(child, PTRACE_GETSIGINFO, addr, data); |
1640 | goto out_tsk; | ||
1641 | 1532 | ||
1642 | case PTRACE_OLD_SETSIGINFO: | 1533 | case PTRACE_OLD_SETSIGINFO: |
1643 | /* for backwards-compatibility */ | 1534 | /* for backwards-compatibility */ |
1644 | ret = ptrace_request(child, PTRACE_SETSIGINFO, addr, data); | 1535 | return ptrace_request(child, PTRACE_SETSIGINFO, addr, data); |
1645 | goto out_tsk; | ||
1646 | |||
1647 | case PTRACE_SYSCALL: | ||
1648 | /* continue and stop at next (return from) syscall */ | ||
1649 | case PTRACE_CONT: | ||
1650 | /* restart after signal. */ | ||
1651 | ret = -EIO; | ||
1652 | if (!valid_signal(data)) | ||
1653 | goto out_tsk; | ||
1654 | if (request == PTRACE_SYSCALL) | ||
1655 | set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | ||
1656 | else | ||
1657 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | ||
1658 | child->exit_code = data; | ||
1659 | 1536 | ||
1660 | /* | 1537 | case PTRACE_GETREGS: |
1661 | * Make sure the single step/taken-branch trap bits | 1538 | return ptrace_getregs(child, |
1662 | * are not set: | 1539 | (struct pt_all_user_regs __user *) data); |
1663 | */ | ||
1664 | clear_tsk_thread_flag(child, TIF_SINGLESTEP); | ||
1665 | ia64_psr(pt)->ss = 0; | ||
1666 | ia64_psr(pt)->tb = 0; | ||
1667 | 1540 | ||
1668 | wake_up_process(child); | 1541 | case PTRACE_SETREGS: |
1669 | ret = 0; | 1542 | return ptrace_setregs(child, |
1670 | goto out_tsk; | 1543 | (struct pt_all_user_regs __user *) data); |
1671 | 1544 | ||
1672 | case PTRACE_KILL: | 1545 | default: |
1673 | /* | 1546 | return ptrace_request(child, request, addr, data); |
1674 | * Make the child exit. Best I can do is send it a | ||
1675 | * sigkill. Perhaps it should be put in the status | ||
1676 | * that it wants to exit. | ||
1677 | */ | ||
1678 | if (child->exit_state == EXIT_ZOMBIE) | ||
1679 | /* already dead */ | ||
1680 | goto out_tsk; | ||
1681 | child->exit_code = SIGKILL; | ||
1682 | |||
1683 | ptrace_disable(child); | ||
1684 | wake_up_process(child); | ||
1685 | ret = 0; | ||
1686 | goto out_tsk; | ||
1687 | |||
1688 | case PTRACE_SINGLESTEP: | ||
1689 | /* let child execute for one instruction */ | ||
1690 | case PTRACE_SINGLEBLOCK: | ||
1691 | ret = -EIO; | ||
1692 | if (!valid_signal(data)) | ||
1693 | goto out_tsk; | ||
1694 | |||
1695 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | ||
1696 | set_tsk_thread_flag(child, TIF_SINGLESTEP); | ||
1697 | if (request == PTRACE_SINGLESTEP) { | ||
1698 | ia64_psr(pt)->ss = 1; | ||
1699 | } else { | ||
1700 | ia64_psr(pt)->tb = 1; | ||
1701 | } | ||
1702 | child->exit_code = data; | ||
1703 | |||
1704 | /* give it a chance to run. */ | ||
1705 | wake_up_process(child); | ||
1706 | ret = 0; | ||
1707 | goto out_tsk; | ||
1708 | |||
1709 | case PTRACE_DETACH: | ||
1710 | /* detach a process that was attached. */ | ||
1711 | ret = ptrace_detach(child, data); | ||
1712 | goto out_tsk; | ||
1713 | |||
1714 | case PTRACE_GETREGS: | ||
1715 | ret = ptrace_getregs(child, | ||
1716 | (struct pt_all_user_regs __user *) data); | ||
1717 | goto out_tsk; | ||
1718 | |||
1719 | case PTRACE_SETREGS: | ||
1720 | ret = ptrace_setregs(child, | ||
1721 | (struct pt_all_user_regs __user *) data); | ||
1722 | goto out_tsk; | ||
1723 | |||
1724 | default: | ||
1725 | ret = ptrace_request(child, request, addr, data); | ||
1726 | goto out_tsk; | ||
1727 | } | 1547 | } |
1728 | out_tsk: | ||
1729 | put_task_struct(child); | ||
1730 | out: | ||
1731 | unlock_kernel(); | ||
1732 | return ret; | ||
1733 | } | 1548 | } |
1734 | 1549 | ||
1735 | 1550 | ||
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index ebd1a09f3201..4aa9eaea76c3 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c | |||
@@ -690,7 +690,7 @@ get_model_name(__u8 family, __u8 model) | |||
690 | if (overflow++ == 0) | 690 | if (overflow++ == 0) |
691 | printk(KERN_ERR | 691 | printk(KERN_ERR |
692 | "%s: Table overflow. Some processor model information will be missing\n", | 692 | "%s: Table overflow. Some processor model information will be missing\n", |
693 | __FUNCTION__); | 693 | __func__); |
694 | return "Unknown"; | 694 | return "Unknown"; |
695 | } | 695 | } |
696 | 696 | ||
@@ -785,7 +785,7 @@ get_max_cacheline_size (void) | |||
785 | status = ia64_pal_cache_summary(&levels, &unique_caches); | 785 | status = ia64_pal_cache_summary(&levels, &unique_caches); |
786 | if (status != 0) { | 786 | if (status != 0) { |
787 | printk(KERN_ERR "%s: ia64_pal_cache_summary() failed (status=%ld)\n", | 787 | printk(KERN_ERR "%s: ia64_pal_cache_summary() failed (status=%ld)\n", |
788 | __FUNCTION__, status); | 788 | __func__, status); |
789 | max = SMP_CACHE_BYTES; | 789 | max = SMP_CACHE_BYTES; |
790 | /* Safest setup for "flush_icache_range()" */ | 790 | /* Safest setup for "flush_icache_range()" */ |
791 | ia64_i_cache_stride_shift = I_CACHE_STRIDE_SHIFT; | 791 | ia64_i_cache_stride_shift = I_CACHE_STRIDE_SHIFT; |
@@ -798,7 +798,7 @@ get_max_cacheline_size (void) | |||
798 | if (status != 0) { | 798 | if (status != 0) { |
799 | printk(KERN_ERR | 799 | printk(KERN_ERR |
800 | "%s: ia64_pal_cache_config_info(l=%lu, 2) failed (status=%ld)\n", | 800 | "%s: ia64_pal_cache_config_info(l=%lu, 2) failed (status=%ld)\n", |
801 | __FUNCTION__, l, status); | 801 | __func__, l, status); |
802 | max = SMP_CACHE_BYTES; | 802 | max = SMP_CACHE_BYTES; |
803 | /* The safest setup for "flush_icache_range()" */ | 803 | /* The safest setup for "flush_icache_range()" */ |
804 | cci.pcci_stride = I_CACHE_STRIDE_SHIFT; | 804 | cci.pcci_stride = I_CACHE_STRIDE_SHIFT; |
@@ -814,7 +814,7 @@ get_max_cacheline_size (void) | |||
814 | if (status != 0) { | 814 | if (status != 0) { |
815 | printk(KERN_ERR | 815 | printk(KERN_ERR |
816 | "%s: ia64_pal_cache_config_info(l=%lu, 1) failed (status=%ld)\n", | 816 | "%s: ia64_pal_cache_config_info(l=%lu, 1) failed (status=%ld)\n", |
817 | __FUNCTION__, l, status); | 817 | __func__, l, status); |
818 | /* The safest setup for "flush_icache_range()" */ | 818 | /* The safest setup for "flush_icache_range()" */ |
819 | cci.pcci_stride = I_CACHE_STRIDE_SHIFT; | 819 | cci.pcci_stride = I_CACHE_STRIDE_SHIFT; |
820 | } | 820 | } |
diff --git a/arch/ia64/kernel/unaligned.c b/arch/ia64/kernel/unaligned.c index 52f70bbc192a..6903361d11a5 100644 --- a/arch/ia64/kernel/unaligned.c +++ b/arch/ia64/kernel/unaligned.c | |||
@@ -28,7 +28,7 @@ extern int die_if_kernel(char *str, struct pt_regs *regs, long err); | |||
28 | #undef DEBUG_UNALIGNED_TRAP | 28 | #undef DEBUG_UNALIGNED_TRAP |
29 | 29 | ||
30 | #ifdef DEBUG_UNALIGNED_TRAP | 30 | #ifdef DEBUG_UNALIGNED_TRAP |
31 | # define DPRINT(a...) do { printk("%s %u: ", __FUNCTION__, __LINE__); printk (a); } while (0) | 31 | # define DPRINT(a...) do { printk("%s %u: ", __func__, __LINE__); printk (a); } while (0) |
32 | # define DDUMP(str,vp,len) dump(str, vp, len) | 32 | # define DDUMP(str,vp,len) dump(str, vp, len) |
33 | 33 | ||
34 | static void | 34 | static void |
@@ -674,7 +674,7 @@ emulate_load_updates (update_t type, load_store_t ld, struct pt_regs *regs, unsi | |||
674 | * just in case. | 674 | * just in case. |
675 | */ | 675 | */ |
676 | if (ld.x6_op == 1 || ld.x6_op == 3) { | 676 | if (ld.x6_op == 1 || ld.x6_op == 3) { |
677 | printk(KERN_ERR "%s: register update on speculative load, error\n", __FUNCTION__); | 677 | printk(KERN_ERR "%s: register update on speculative load, error\n", __func__); |
678 | if (die_if_kernel("unaligned reference on speculative load with register update\n", | 678 | if (die_if_kernel("unaligned reference on speculative load with register update\n", |
679 | regs, 30)) | 679 | regs, 30)) |
680 | return; | 680 | return; |
@@ -1104,7 +1104,7 @@ emulate_load_floatpair (unsigned long ifa, load_store_t ld, struct pt_regs *regs | |||
1104 | */ | 1104 | */ |
1105 | if (ld.x6_op == 1 || ld.x6_op == 3) | 1105 | if (ld.x6_op == 1 || ld.x6_op == 3) |
1106 | printk(KERN_ERR "%s: register update on speculative load pair, error\n", | 1106 | printk(KERN_ERR "%s: register update on speculative load pair, error\n", |
1107 | __FUNCTION__); | 1107 | __func__); |
1108 | 1108 | ||
1109 | setreg(ld.r3, ifa, 0, regs); | 1109 | setreg(ld.r3, ifa, 0, regs); |
1110 | } | 1110 | } |
diff --git a/arch/ia64/kernel/unwind.c b/arch/ia64/kernel/unwind.c index c1bdb5131814..67810b77d998 100644 --- a/arch/ia64/kernel/unwind.c +++ b/arch/ia64/kernel/unwind.c | |||
@@ -257,7 +257,7 @@ pt_regs_off (unsigned long reg) | |||
257 | off = unw.pt_regs_offsets[reg]; | 257 | off = unw.pt_regs_offsets[reg]; |
258 | 258 | ||
259 | if (off < 0) { | 259 | if (off < 0) { |
260 | UNW_DPRINT(0, "unwind.%s: bad scratch reg r%lu\n", __FUNCTION__, reg); | 260 | UNW_DPRINT(0, "unwind.%s: bad scratch reg r%lu\n", __func__, reg); |
261 | off = 0; | 261 | off = 0; |
262 | } | 262 | } |
263 | return (unsigned long) off; | 263 | return (unsigned long) off; |
@@ -268,13 +268,13 @@ get_scratch_regs (struct unw_frame_info *info) | |||
268 | { | 268 | { |
269 | if (!info->pt) { | 269 | if (!info->pt) { |
270 | /* This should not happen with valid unwind info. */ | 270 | /* This should not happen with valid unwind info. */ |
271 | UNW_DPRINT(0, "unwind.%s: bad unwind info: resetting info->pt\n", __FUNCTION__); | 271 | UNW_DPRINT(0, "unwind.%s: bad unwind info: resetting info->pt\n", __func__); |
272 | if (info->flags & UNW_FLAG_INTERRUPT_FRAME) | 272 | if (info->flags & UNW_FLAG_INTERRUPT_FRAME) |
273 | info->pt = (unsigned long) ((struct pt_regs *) info->psp - 1); | 273 | info->pt = (unsigned long) ((struct pt_regs *) info->psp - 1); |
274 | else | 274 | else |
275 | info->pt = info->sp - 16; | 275 | info->pt = info->sp - 16; |
276 | } | 276 | } |
277 | UNW_DPRINT(3, "unwind.%s: sp 0x%lx pt 0x%lx\n", __FUNCTION__, info->sp, info->pt); | 277 | UNW_DPRINT(3, "unwind.%s: sp 0x%lx pt 0x%lx\n", __func__, info->sp, info->pt); |
278 | return (struct pt_regs *) info->pt; | 278 | return (struct pt_regs *) info->pt; |
279 | } | 279 | } |
280 | 280 | ||
@@ -294,7 +294,7 @@ unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char | |||
294 | return 0; | 294 | return 0; |
295 | } | 295 | } |
296 | UNW_DPRINT(0, "unwind.%s: trying to access non-existent r%u\n", | 296 | UNW_DPRINT(0, "unwind.%s: trying to access non-existent r%u\n", |
297 | __FUNCTION__, regnum); | 297 | __func__, regnum); |
298 | return -1; | 298 | return -1; |
299 | } | 299 | } |
300 | 300 | ||
@@ -341,7 +341,7 @@ unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char | |||
341 | { | 341 | { |
342 | UNW_DPRINT(0, "unwind.%s: %p outside of regstk " | 342 | UNW_DPRINT(0, "unwind.%s: %p outside of regstk " |
343 | "[0x%lx-0x%lx)\n", | 343 | "[0x%lx-0x%lx)\n", |
344 | __FUNCTION__, (void *) addr, | 344 | __func__, (void *) addr, |
345 | info->regstk.limit, | 345 | info->regstk.limit, |
346 | info->regstk.top); | 346 | info->regstk.top); |
347 | return -1; | 347 | return -1; |
@@ -374,7 +374,7 @@ unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char | |||
374 | || (unsigned long) addr >= info->regstk.top) | 374 | || (unsigned long) addr >= info->regstk.top) |
375 | { | 375 | { |
376 | UNW_DPRINT(0, "unwind.%s: ignoring attempt to access register outside " | 376 | UNW_DPRINT(0, "unwind.%s: ignoring attempt to access register outside " |
377 | "of rbs\n", __FUNCTION__); | 377 | "of rbs\n", __func__); |
378 | return -1; | 378 | return -1; |
379 | } | 379 | } |
380 | if ((unsigned long) nat_addr >= info->regstk.top) | 380 | if ((unsigned long) nat_addr >= info->regstk.top) |
@@ -385,7 +385,7 @@ unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char | |||
385 | if (write) { | 385 | if (write) { |
386 | if (read_only(addr)) { | 386 | if (read_only(addr)) { |
387 | UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n", | 387 | UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n", |
388 | __FUNCTION__); | 388 | __func__); |
389 | } else { | 389 | } else { |
390 | *addr = *val; | 390 | *addr = *val; |
391 | if (*nat) | 391 | if (*nat) |
@@ -427,13 +427,13 @@ unw_access_br (struct unw_frame_info *info, int regnum, unsigned long *val, int | |||
427 | 427 | ||
428 | default: | 428 | default: |
429 | UNW_DPRINT(0, "unwind.%s: trying to access non-existent b%u\n", | 429 | UNW_DPRINT(0, "unwind.%s: trying to access non-existent b%u\n", |
430 | __FUNCTION__, regnum); | 430 | __func__, regnum); |
431 | return -1; | 431 | return -1; |
432 | } | 432 | } |
433 | if (write) | 433 | if (write) |
434 | if (read_only(addr)) { | 434 | if (read_only(addr)) { |
435 | UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n", | 435 | UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n", |
436 | __FUNCTION__); | 436 | __func__); |
437 | } else | 437 | } else |
438 | *addr = *val; | 438 | *addr = *val; |
439 | else | 439 | else |
@@ -450,7 +450,7 @@ unw_access_fr (struct unw_frame_info *info, int regnum, struct ia64_fpreg *val, | |||
450 | 450 | ||
451 | if ((unsigned) (regnum - 2) >= 126) { | 451 | if ((unsigned) (regnum - 2) >= 126) { |
452 | UNW_DPRINT(0, "unwind.%s: trying to access non-existent f%u\n", | 452 | UNW_DPRINT(0, "unwind.%s: trying to access non-existent f%u\n", |
453 | __FUNCTION__, regnum); | 453 | __func__, regnum); |
454 | return -1; | 454 | return -1; |
455 | } | 455 | } |
456 | 456 | ||
@@ -482,7 +482,7 @@ unw_access_fr (struct unw_frame_info *info, int regnum, struct ia64_fpreg *val, | |||
482 | if (write) | 482 | if (write) |
483 | if (read_only(addr)) { | 483 | if (read_only(addr)) { |
484 | UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n", | 484 | UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n", |
485 | __FUNCTION__); | 485 | __func__); |
486 | } else | 486 | } else |
487 | *addr = *val; | 487 | *addr = *val; |
488 | else | 488 | else |
@@ -572,14 +572,14 @@ unw_access_ar (struct unw_frame_info *info, int regnum, unsigned long *val, int | |||
572 | 572 | ||
573 | default: | 573 | default: |
574 | UNW_DPRINT(0, "unwind.%s: trying to access non-existent ar%u\n", | 574 | UNW_DPRINT(0, "unwind.%s: trying to access non-existent ar%u\n", |
575 | __FUNCTION__, regnum); | 575 | __func__, regnum); |
576 | return -1; | 576 | return -1; |
577 | } | 577 | } |
578 | 578 | ||
579 | if (write) { | 579 | if (write) { |
580 | if (read_only(addr)) { | 580 | if (read_only(addr)) { |
581 | UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n", | 581 | UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n", |
582 | __FUNCTION__); | 582 | __func__); |
583 | } else | 583 | } else |
584 | *addr = *val; | 584 | *addr = *val; |
585 | } else | 585 | } else |
@@ -600,7 +600,7 @@ unw_access_pr (struct unw_frame_info *info, unsigned long *val, int write) | |||
600 | if (write) { | 600 | if (write) { |
601 | if (read_only(addr)) { | 601 | if (read_only(addr)) { |
602 | UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n", | 602 | UNW_DPRINT(0, "unwind.%s: ignoring attempt to write read-only location\n", |
603 | __FUNCTION__); | 603 | __func__); |
604 | } else | 604 | } else |
605 | *addr = *val; | 605 | *addr = *val; |
606 | } else | 606 | } else |
@@ -699,7 +699,7 @@ decode_abreg (unsigned char abreg, int memory) | |||
699 | default: | 699 | default: |
700 | break; | 700 | break; |
701 | } | 701 | } |
702 | UNW_DPRINT(0, "unwind.%s: bad abreg=0x%x\n", __FUNCTION__, abreg); | 702 | UNW_DPRINT(0, "unwind.%s: bad abreg=0x%x\n", __func__, abreg); |
703 | return UNW_REG_LC; | 703 | return UNW_REG_LC; |
704 | } | 704 | } |
705 | 705 | ||
@@ -739,7 +739,7 @@ spill_next_when (struct unw_reg_info **regp, struct unw_reg_info *lim, unw_word | |||
739 | return; | 739 | return; |
740 | } | 740 | } |
741 | } | 741 | } |
742 | UNW_DPRINT(0, "unwind.%s: excess spill!\n", __FUNCTION__); | 742 | UNW_DPRINT(0, "unwind.%s: excess spill!\n", __func__); |
743 | } | 743 | } |
744 | 744 | ||
745 | static inline void | 745 | static inline void |
@@ -855,11 +855,11 @@ desc_abi (unsigned char abi, unsigned char context, struct unw_state_record *sr) | |||
855 | { | 855 | { |
856 | if (abi == 3 && context == 'i') { | 856 | if (abi == 3 && context == 'i') { |
857 | sr->flags |= UNW_FLAG_INTERRUPT_FRAME; | 857 | sr->flags |= UNW_FLAG_INTERRUPT_FRAME; |
858 | UNW_DPRINT(3, "unwind.%s: interrupt frame\n", __FUNCTION__); | 858 | UNW_DPRINT(3, "unwind.%s: interrupt frame\n", __func__); |
859 | } | 859 | } |
860 | else | 860 | else |
861 | UNW_DPRINT(0, "unwind%s: ignoring unwabi(abi=0x%x,context=0x%x)\n", | 861 | UNW_DPRINT(0, "unwind%s: ignoring unwabi(abi=0x%x,context=0x%x)\n", |
862 | __FUNCTION__, abi, context); | 862 | __func__, abi, context); |
863 | } | 863 | } |
864 | 864 | ||
865 | static inline void | 865 | static inline void |
@@ -1347,7 +1347,7 @@ script_emit (struct unw_script *script, struct unw_insn insn) | |||
1347 | { | 1347 | { |
1348 | if (script->count >= UNW_MAX_SCRIPT_LEN) { | 1348 | if (script->count >= UNW_MAX_SCRIPT_LEN) { |
1349 | UNW_DPRINT(0, "unwind.%s: script exceeds maximum size of %u instructions!\n", | 1349 | UNW_DPRINT(0, "unwind.%s: script exceeds maximum size of %u instructions!\n", |
1350 | __FUNCTION__, UNW_MAX_SCRIPT_LEN); | 1350 | __func__, UNW_MAX_SCRIPT_LEN); |
1351 | return; | 1351 | return; |
1352 | } | 1352 | } |
1353 | script->insn[script->count++] = insn; | 1353 | script->insn[script->count++] = insn; |
@@ -1389,7 +1389,7 @@ emit_nat_info (struct unw_state_record *sr, int i, struct unw_script *script) | |||
1389 | 1389 | ||
1390 | default: | 1390 | default: |
1391 | UNW_DPRINT(0, "unwind.%s: don't know how to emit nat info for where = %u\n", | 1391 | UNW_DPRINT(0, "unwind.%s: don't know how to emit nat info for where = %u\n", |
1392 | __FUNCTION__, r->where); | 1392 | __func__, r->where); |
1393 | return; | 1393 | return; |
1394 | } | 1394 | } |
1395 | insn.opc = opc; | 1395 | insn.opc = opc; |
@@ -1446,7 +1446,7 @@ compile_reg (struct unw_state_record *sr, int i, struct unw_script *script) | |||
1446 | val = offsetof(struct pt_regs, f6) + 16*(rval - 6); | 1446 | val = offsetof(struct pt_regs, f6) + 16*(rval - 6); |
1447 | else | 1447 | else |
1448 | UNW_DPRINT(0, "unwind.%s: kernel may not touch f%lu\n", | 1448 | UNW_DPRINT(0, "unwind.%s: kernel may not touch f%lu\n", |
1449 | __FUNCTION__, rval); | 1449 | __func__, rval); |
1450 | } | 1450 | } |
1451 | break; | 1451 | break; |
1452 | 1452 | ||
@@ -1474,7 +1474,7 @@ compile_reg (struct unw_state_record *sr, int i, struct unw_script *script) | |||
1474 | 1474 | ||
1475 | default: | 1475 | default: |
1476 | UNW_DPRINT(0, "unwind%s: register %u has unexpected `where' value of %u\n", | 1476 | UNW_DPRINT(0, "unwind%s: register %u has unexpected `where' value of %u\n", |
1477 | __FUNCTION__, i, r->where); | 1477 | __func__, i, r->where); |
1478 | break; | 1478 | break; |
1479 | } | 1479 | } |
1480 | insn.opc = opc; | 1480 | insn.opc = opc; |
@@ -1547,10 +1547,10 @@ build_script (struct unw_frame_info *info) | |||
1547 | r->when = UNW_WHEN_NEVER; | 1547 | r->when = UNW_WHEN_NEVER; |
1548 | sr.pr_val = info->pr; | 1548 | sr.pr_val = info->pr; |
1549 | 1549 | ||
1550 | UNW_DPRINT(3, "unwind.%s: ip 0x%lx\n", __FUNCTION__, ip); | 1550 | UNW_DPRINT(3, "unwind.%s: ip 0x%lx\n", __func__, ip); |
1551 | script = script_new(ip); | 1551 | script = script_new(ip); |
1552 | if (!script) { | 1552 | if (!script) { |
1553 | UNW_DPRINT(0, "unwind.%s: failed to create unwind script\n", __FUNCTION__); | 1553 | UNW_DPRINT(0, "unwind.%s: failed to create unwind script\n", __func__); |
1554 | STAT(unw.stat.script.build_time += ia64_get_itc() - start); | 1554 | STAT(unw.stat.script.build_time += ia64_get_itc() - start); |
1555 | return NULL; | 1555 | return NULL; |
1556 | } | 1556 | } |
@@ -1569,7 +1569,7 @@ build_script (struct unw_frame_info *info) | |||
1569 | if (!e) { | 1569 | if (!e) { |
1570 | /* no info, return default unwinder (leaf proc, no mem stack, no saved regs) */ | 1570 | /* no info, return default unwinder (leaf proc, no mem stack, no saved regs) */ |
1571 | UNW_DPRINT(1, "unwind.%s: no unwind info for ip=0x%lx (prev ip=0x%lx)\n", | 1571 | UNW_DPRINT(1, "unwind.%s: no unwind info for ip=0x%lx (prev ip=0x%lx)\n", |
1572 | __FUNCTION__, ip, unw.cache[info->prev_script].ip); | 1572 | __func__, ip, unw.cache[info->prev_script].ip); |
1573 | sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR; | 1573 | sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR; |
1574 | sr.curr.reg[UNW_REG_RP].when = -1; | 1574 | sr.curr.reg[UNW_REG_RP].when = -1; |
1575 | sr.curr.reg[UNW_REG_RP].val = 0; | 1575 | sr.curr.reg[UNW_REG_RP].val = 0; |
@@ -1618,13 +1618,13 @@ build_script (struct unw_frame_info *info) | |||
1618 | sr.curr.reg[UNW_REG_RP].when = -1; | 1618 | sr.curr.reg[UNW_REG_RP].when = -1; |
1619 | sr.curr.reg[UNW_REG_RP].val = sr.return_link_reg; | 1619 | sr.curr.reg[UNW_REG_RP].val = sr.return_link_reg; |
1620 | UNW_DPRINT(1, "unwind.%s: using default for rp at ip=0x%lx where=%d val=0x%lx\n", | 1620 | UNW_DPRINT(1, "unwind.%s: using default for rp at ip=0x%lx where=%d val=0x%lx\n", |
1621 | __FUNCTION__, ip, sr.curr.reg[UNW_REG_RP].where, | 1621 | __func__, ip, sr.curr.reg[UNW_REG_RP].where, |
1622 | sr.curr.reg[UNW_REG_RP].val); | 1622 | sr.curr.reg[UNW_REG_RP].val); |
1623 | } | 1623 | } |
1624 | 1624 | ||
1625 | #ifdef UNW_DEBUG | 1625 | #ifdef UNW_DEBUG |
1626 | UNW_DPRINT(1, "unwind.%s: state record for func 0x%lx, t=%u:\n", | 1626 | UNW_DPRINT(1, "unwind.%s: state record for func 0x%lx, t=%u:\n", |
1627 | __FUNCTION__, table->segment_base + e->start_offset, sr.when_target); | 1627 | __func__, table->segment_base + e->start_offset, sr.when_target); |
1628 | for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r) { | 1628 | for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r) { |
1629 | if (r->where != UNW_WHERE_NONE || r->when != UNW_WHEN_NEVER) { | 1629 | if (r->where != UNW_WHERE_NONE || r->when != UNW_WHEN_NEVER) { |
1630 | UNW_DPRINT(1, " %s <- ", unw.preg_name[r - sr.curr.reg]); | 1630 | UNW_DPRINT(1, " %s <- ", unw.preg_name[r - sr.curr.reg]); |
@@ -1746,7 +1746,7 @@ run_script (struct unw_script *script, struct unw_frame_info *state) | |||
1746 | } else { | 1746 | } else { |
1747 | s[dst] = 0; | 1747 | s[dst] = 0; |
1748 | UNW_DPRINT(0, "unwind.%s: no state->pt, dst=%ld, val=%ld\n", | 1748 | UNW_DPRINT(0, "unwind.%s: no state->pt, dst=%ld, val=%ld\n", |
1749 | __FUNCTION__, dst, val); | 1749 | __func__, dst, val); |
1750 | } | 1750 | } |
1751 | break; | 1751 | break; |
1752 | 1752 | ||
@@ -1756,7 +1756,7 @@ run_script (struct unw_script *script, struct unw_frame_info *state) | |||
1756 | else { | 1756 | else { |
1757 | s[dst] = 0; | 1757 | s[dst] = 0; |
1758 | UNW_DPRINT(0, "unwind.%s: UNW_INSN_MOVE_CONST bad val=%ld\n", | 1758 | UNW_DPRINT(0, "unwind.%s: UNW_INSN_MOVE_CONST bad val=%ld\n", |
1759 | __FUNCTION__, val); | 1759 | __func__, val); |
1760 | } | 1760 | } |
1761 | break; | 1761 | break; |
1762 | 1762 | ||
@@ -1791,7 +1791,7 @@ run_script (struct unw_script *script, struct unw_frame_info *state) | |||
1791 | || s[val] < TASK_SIZE) | 1791 | || s[val] < TASK_SIZE) |
1792 | { | 1792 | { |
1793 | UNW_DPRINT(0, "unwind.%s: rejecting bad psp=0x%lx\n", | 1793 | UNW_DPRINT(0, "unwind.%s: rejecting bad psp=0x%lx\n", |
1794 | __FUNCTION__, s[val]); | 1794 | __func__, s[val]); |
1795 | break; | 1795 | break; |
1796 | } | 1796 | } |
1797 | #endif | 1797 | #endif |
@@ -1825,7 +1825,7 @@ find_save_locs (struct unw_frame_info *info) | |||
1825 | if ((info->ip & (local_cpu_data->unimpl_va_mask | 0xf)) || info->ip < TASK_SIZE) { | 1825 | if ((info->ip & (local_cpu_data->unimpl_va_mask | 0xf)) || info->ip < TASK_SIZE) { |
1826 | /* don't let obviously bad addresses pollute the cache */ | 1826 | /* don't let obviously bad addresses pollute the cache */ |
1827 | /* FIXME: should really be level 0 but it occurs too often. KAO */ | 1827 | /* FIXME: should really be level 0 but it occurs too often. KAO */ |
1828 | UNW_DPRINT(1, "unwind.%s: rejecting bad ip=0x%lx\n", __FUNCTION__, info->ip); | 1828 | UNW_DPRINT(1, "unwind.%s: rejecting bad ip=0x%lx\n", __func__, info->ip); |
1829 | info->rp_loc = NULL; | 1829 | info->rp_loc = NULL; |
1830 | return -1; | 1830 | return -1; |
1831 | } | 1831 | } |
@@ -1838,7 +1838,7 @@ find_save_locs (struct unw_frame_info *info) | |||
1838 | spin_unlock_irqrestore(&unw.lock, flags); | 1838 | spin_unlock_irqrestore(&unw.lock, flags); |
1839 | UNW_DPRINT(0, | 1839 | UNW_DPRINT(0, |
1840 | "unwind.%s: failed to locate/build unwind script for ip %lx\n", | 1840 | "unwind.%s: failed to locate/build unwind script for ip %lx\n", |
1841 | __FUNCTION__, info->ip); | 1841 | __func__, info->ip); |
1842 | return -1; | 1842 | return -1; |
1843 | } | 1843 | } |
1844 | have_write_lock = 1; | 1844 | have_write_lock = 1; |
@@ -1882,21 +1882,21 @@ unw_unwind (struct unw_frame_info *info) | |||
1882 | if (!unw_valid(info, info->rp_loc)) { | 1882 | if (!unw_valid(info, info->rp_loc)) { |
1883 | /* FIXME: should really be level 0 but it occurs too often. KAO */ | 1883 | /* FIXME: should really be level 0 but it occurs too often. KAO */ |
1884 | UNW_DPRINT(1, "unwind.%s: failed to locate return link (ip=0x%lx)!\n", | 1884 | UNW_DPRINT(1, "unwind.%s: failed to locate return link (ip=0x%lx)!\n", |
1885 | __FUNCTION__, info->ip); | 1885 | __func__, info->ip); |
1886 | STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags)); | 1886 | STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags)); |
1887 | return -1; | 1887 | return -1; |
1888 | } | 1888 | } |
1889 | /* restore the ip */ | 1889 | /* restore the ip */ |
1890 | ip = info->ip = *info->rp_loc; | 1890 | ip = info->ip = *info->rp_loc; |
1891 | if (ip < GATE_ADDR) { | 1891 | if (ip < GATE_ADDR) { |
1892 | UNW_DPRINT(2, "unwind.%s: reached user-space (ip=0x%lx)\n", __FUNCTION__, ip); | 1892 | UNW_DPRINT(2, "unwind.%s: reached user-space (ip=0x%lx)\n", __func__, ip); |
1893 | STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags)); | 1893 | STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags)); |
1894 | return -1; | 1894 | return -1; |
1895 | } | 1895 | } |
1896 | 1896 | ||
1897 | /* validate the previous stack frame pointer */ | 1897 | /* validate the previous stack frame pointer */ |
1898 | if (!unw_valid(info, info->pfs_loc)) { | 1898 | if (!unw_valid(info, info->pfs_loc)) { |
1899 | UNW_DPRINT(0, "unwind.%s: failed to locate ar.pfs!\n", __FUNCTION__); | 1899 | UNW_DPRINT(0, "unwind.%s: failed to locate ar.pfs!\n", __func__); |
1900 | STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags)); | 1900 | STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags)); |
1901 | return -1; | 1901 | return -1; |
1902 | } | 1902 | } |
@@ -1912,13 +1912,13 @@ unw_unwind (struct unw_frame_info *info) | |||
1912 | num_regs = *info->cfm_loc & 0x7f; /* size of frame */ | 1912 | num_regs = *info->cfm_loc & 0x7f; /* size of frame */ |
1913 | info->pfs_loc = | 1913 | info->pfs_loc = |
1914 | (unsigned long *) (info->pt + offsetof(struct pt_regs, ar_pfs)); | 1914 | (unsigned long *) (info->pt + offsetof(struct pt_regs, ar_pfs)); |
1915 | UNW_DPRINT(3, "unwind.%s: interrupt_frame pt 0x%lx\n", __FUNCTION__, info->pt); | 1915 | UNW_DPRINT(3, "unwind.%s: interrupt_frame pt 0x%lx\n", __func__, info->pt); |
1916 | } else | 1916 | } else |
1917 | num_regs = (*info->cfm_loc >> 7) & 0x7f; /* size of locals */ | 1917 | num_regs = (*info->cfm_loc >> 7) & 0x7f; /* size of locals */ |
1918 | info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->bsp, -num_regs); | 1918 | info->bsp = (unsigned long) ia64_rse_skip_regs((unsigned long *) info->bsp, -num_regs); |
1919 | if (info->bsp < info->regstk.limit || info->bsp > info->regstk.top) { | 1919 | if (info->bsp < info->regstk.limit || info->bsp > info->regstk.top) { |
1920 | UNW_DPRINT(0, "unwind.%s: bsp (0x%lx) out of range [0x%lx-0x%lx]\n", | 1920 | UNW_DPRINT(0, "unwind.%s: bsp (0x%lx) out of range [0x%lx-0x%lx]\n", |
1921 | __FUNCTION__, info->bsp, info->regstk.limit, info->regstk.top); | 1921 | __func__, info->bsp, info->regstk.limit, info->regstk.top); |
1922 | STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags)); | 1922 | STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags)); |
1923 | return -1; | 1923 | return -1; |
1924 | } | 1924 | } |
@@ -1927,14 +1927,14 @@ unw_unwind (struct unw_frame_info *info) | |||
1927 | info->sp = info->psp; | 1927 | info->sp = info->psp; |
1928 | if (info->sp < info->memstk.top || info->sp > info->memstk.limit) { | 1928 | if (info->sp < info->memstk.top || info->sp > info->memstk.limit) { |
1929 | UNW_DPRINT(0, "unwind.%s: sp (0x%lx) out of range [0x%lx-0x%lx]\n", | 1929 | UNW_DPRINT(0, "unwind.%s: sp (0x%lx) out of range [0x%lx-0x%lx]\n", |
1930 | __FUNCTION__, info->sp, info->memstk.top, info->memstk.limit); | 1930 | __func__, info->sp, info->memstk.top, info->memstk.limit); |
1931 | STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags)); | 1931 | STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags)); |
1932 | return -1; | 1932 | return -1; |
1933 | } | 1933 | } |
1934 | 1934 | ||
1935 | if (info->ip == prev_ip && info->sp == prev_sp && info->bsp == prev_bsp) { | 1935 | if (info->ip == prev_ip && info->sp == prev_sp && info->bsp == prev_bsp) { |
1936 | UNW_DPRINT(0, "unwind.%s: ip, sp, bsp unchanged; stopping here (ip=0x%lx)\n", | 1936 | UNW_DPRINT(0, "unwind.%s: ip, sp, bsp unchanged; stopping here (ip=0x%lx)\n", |
1937 | __FUNCTION__, ip); | 1937 | __func__, ip); |
1938 | STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags)); | 1938 | STAT(unw.stat.api.unwind_time += ia64_get_itc() - start; local_irq_restore(flags)); |
1939 | return -1; | 1939 | return -1; |
1940 | } | 1940 | } |
@@ -1961,7 +1961,7 @@ unw_unwind_to_user (struct unw_frame_info *info) | |||
1961 | if ((long)((unsigned long)info->task + IA64_STK_OFFSET - sp) | 1961 | if ((long)((unsigned long)info->task + IA64_STK_OFFSET - sp) |
1962 | < IA64_PT_REGS_SIZE) { | 1962 | < IA64_PT_REGS_SIZE) { |
1963 | UNW_DPRINT(0, "unwind.%s: ran off the top of the kernel stack\n", | 1963 | UNW_DPRINT(0, "unwind.%s: ran off the top of the kernel stack\n", |
1964 | __FUNCTION__); | 1964 | __func__); |
1965 | break; | 1965 | break; |
1966 | } | 1966 | } |
1967 | if (unw_is_intr_frame(info) && | 1967 | if (unw_is_intr_frame(info) && |
@@ -1971,13 +1971,13 @@ unw_unwind_to_user (struct unw_frame_info *info) | |||
1971 | unw_get_rp(info, &ip); | 1971 | unw_get_rp(info, &ip); |
1972 | UNW_DPRINT(0, "unwind.%s: failed to read " | 1972 | UNW_DPRINT(0, "unwind.%s: failed to read " |
1973 | "predicate register (ip=0x%lx)\n", | 1973 | "predicate register (ip=0x%lx)\n", |
1974 | __FUNCTION__, ip); | 1974 | __func__, ip); |
1975 | return -1; | 1975 | return -1; |
1976 | } | 1976 | } |
1977 | } while (unw_unwind(info) >= 0); | 1977 | } while (unw_unwind(info) >= 0); |
1978 | unw_get_ip(info, &ip); | 1978 | unw_get_ip(info, &ip); |
1979 | UNW_DPRINT(0, "unwind.%s: failed to unwind to user-level (ip=0x%lx)\n", | 1979 | UNW_DPRINT(0, "unwind.%s: failed to unwind to user-level (ip=0x%lx)\n", |
1980 | __FUNCTION__, ip); | 1980 | __func__, ip); |
1981 | return -1; | 1981 | return -1; |
1982 | } | 1982 | } |
1983 | EXPORT_SYMBOL(unw_unwind_to_user); | 1983 | EXPORT_SYMBOL(unw_unwind_to_user); |
@@ -2028,7 +2028,7 @@ init_frame_info (struct unw_frame_info *info, struct task_struct *t, | |||
2028 | " pr 0x%lx\n" | 2028 | " pr 0x%lx\n" |
2029 | " sw 0x%lx\n" | 2029 | " sw 0x%lx\n" |
2030 | " sp 0x%lx\n", | 2030 | " sp 0x%lx\n", |
2031 | __FUNCTION__, (unsigned long) t, rbslimit, rbstop, stktop, stklimit, | 2031 | __func__, (unsigned long) t, rbslimit, rbstop, stktop, stklimit, |
2032 | info->pr, (unsigned long) info->sw, info->sp); | 2032 | info->pr, (unsigned long) info->sw, info->sp); |
2033 | STAT(unw.stat.api.init_time += ia64_get_itc() - start; local_irq_restore(flags)); | 2033 | STAT(unw.stat.api.init_time += ia64_get_itc() - start; local_irq_restore(flags)); |
2034 | } | 2034 | } |
@@ -2047,7 +2047,7 @@ unw_init_frame_info (struct unw_frame_info *info, struct task_struct *t, struct | |||
2047 | " bsp 0x%lx\n" | 2047 | " bsp 0x%lx\n" |
2048 | " sol 0x%lx\n" | 2048 | " sol 0x%lx\n" |
2049 | " ip 0x%lx\n", | 2049 | " ip 0x%lx\n", |
2050 | __FUNCTION__, info->bsp, sol, info->ip); | 2050 | __func__, info->bsp, sol, info->ip); |
2051 | find_save_locs(info); | 2051 | find_save_locs(info); |
2052 | } | 2052 | } |
2053 | 2053 | ||
@@ -2058,7 +2058,7 @@ unw_init_from_blocked_task (struct unw_frame_info *info, struct task_struct *t) | |||
2058 | { | 2058 | { |
2059 | struct switch_stack *sw = (struct switch_stack *) (t->thread.ksp + 16); | 2059 | struct switch_stack *sw = (struct switch_stack *) (t->thread.ksp + 16); |
2060 | 2060 | ||
2061 | UNW_DPRINT(1, "unwind.%s\n", __FUNCTION__); | 2061 | UNW_DPRINT(1, "unwind.%s\n", __func__); |
2062 | unw_init_frame_info(info, t, sw); | 2062 | unw_init_frame_info(info, t, sw); |
2063 | } | 2063 | } |
2064 | EXPORT_SYMBOL(unw_init_from_blocked_task); | 2064 | EXPORT_SYMBOL(unw_init_from_blocked_task); |
@@ -2088,7 +2088,7 @@ unw_add_unwind_table (const char *name, unsigned long segment_base, unsigned lon | |||
2088 | 2088 | ||
2089 | if (end - start <= 0) { | 2089 | if (end - start <= 0) { |
2090 | UNW_DPRINT(0, "unwind.%s: ignoring attempt to insert empty unwind table\n", | 2090 | UNW_DPRINT(0, "unwind.%s: ignoring attempt to insert empty unwind table\n", |
2091 | __FUNCTION__); | 2091 | __func__); |
2092 | return NULL; | 2092 | return NULL; |
2093 | } | 2093 | } |
2094 | 2094 | ||
@@ -2119,14 +2119,14 @@ unw_remove_unwind_table (void *handle) | |||
2119 | 2119 | ||
2120 | if (!handle) { | 2120 | if (!handle) { |
2121 | UNW_DPRINT(0, "unwind.%s: ignoring attempt to remove non-existent unwind table\n", | 2121 | UNW_DPRINT(0, "unwind.%s: ignoring attempt to remove non-existent unwind table\n", |
2122 | __FUNCTION__); | 2122 | __func__); |
2123 | return; | 2123 | return; |
2124 | } | 2124 | } |
2125 | 2125 | ||
2126 | table = handle; | 2126 | table = handle; |
2127 | if (table == &unw.kernel_table) { | 2127 | if (table == &unw.kernel_table) { |
2128 | UNW_DPRINT(0, "unwind.%s: sorry, freeing the kernel's unwind table is a " | 2128 | UNW_DPRINT(0, "unwind.%s: sorry, freeing the kernel's unwind table is a " |
2129 | "no-can-do!\n", __FUNCTION__); | 2129 | "no-can-do!\n", __func__); |
2130 | return; | 2130 | return; |
2131 | } | 2131 | } |
2132 | 2132 | ||
@@ -2139,7 +2139,7 @@ unw_remove_unwind_table (void *handle) | |||
2139 | break; | 2139 | break; |
2140 | if (!prev) { | 2140 | if (!prev) { |
2141 | UNW_DPRINT(0, "unwind.%s: failed to find unwind table %p\n", | 2141 | UNW_DPRINT(0, "unwind.%s: failed to find unwind table %p\n", |
2142 | __FUNCTION__, (void *) table); | 2142 | __func__, (void *) table); |
2143 | spin_unlock_irqrestore(&unw.lock, flags); | 2143 | spin_unlock_irqrestore(&unw.lock, flags); |
2144 | return; | 2144 | return; |
2145 | } | 2145 | } |
@@ -2185,7 +2185,7 @@ create_gate_table (void) | |||
2185 | } | 2185 | } |
2186 | 2186 | ||
2187 | if (!punw) { | 2187 | if (!punw) { |
2188 | printk("%s: failed to find gate DSO's unwind table!\n", __FUNCTION__); | 2188 | printk("%s: failed to find gate DSO's unwind table!\n", __func__); |
2189 | return 0; | 2189 | return 0; |
2190 | } | 2190 | } |
2191 | 2191 | ||
@@ -2202,7 +2202,7 @@ create_gate_table (void) | |||
2202 | unw.gate_table = kmalloc(size, GFP_KERNEL); | 2202 | unw.gate_table = kmalloc(size, GFP_KERNEL); |
2203 | if (!unw.gate_table) { | 2203 | if (!unw.gate_table) { |
2204 | unw.gate_table_size = 0; | 2204 | unw.gate_table_size = 0; |
2205 | printk(KERN_ERR "%s: unable to create unwind data for gate page!\n", __FUNCTION__); | 2205 | printk(KERN_ERR "%s: unable to create unwind data for gate page!\n", __func__); |
2206 | return 0; | 2206 | return 0; |
2207 | } | 2207 | } |
2208 | unw.gate_table_size = size; | 2208 | unw.gate_table_size = size; |
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c index 3e69881648a3..23088bed111e 100644 --- a/arch/ia64/mm/fault.c +++ b/arch/ia64/mm/fault.c | |||
@@ -26,7 +26,7 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap) | |||
26 | if (!user_mode(regs)) { | 26 | if (!user_mode(regs)) { |
27 | /* kprobe_running() needs smp_processor_id() */ | 27 | /* kprobe_running() needs smp_processor_id() */ |
28 | preempt_disable(); | 28 | preempt_disable(); |
29 | if (kprobe_running() && kprobes_fault_handler(regs, trap)) | 29 | if (kprobe_running() && kprobe_fault_handler(regs, trap)) |
30 | ret = 1; | 30 | ret = 1; |
31 | preempt_enable(); | 31 | preempt_enable(); |
32 | } | 32 | } |
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 25aef6211a54..a4ca657c72c6 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c | |||
@@ -714,7 +714,7 @@ int arch_add_memory(int nid, u64 start, u64 size) | |||
714 | 714 | ||
715 | if (ret) | 715 | if (ret) |
716 | printk("%s: Problem encountered in __add_pages() as ret=%d\n", | 716 | printk("%s: Problem encountered in __add_pages() as ret=%d\n", |
717 | __FUNCTION__, ret); | 717 | __func__, ret); |
718 | 718 | ||
719 | return ret; | 719 | return ret; |
720 | } | 720 | } |
diff --git a/arch/ia64/pci/fixup.c b/arch/ia64/pci/fixup.c index 245dc1fedc24..f5959c0c1810 100644 --- a/arch/ia64/pci/fixup.c +++ b/arch/ia64/pci/fixup.c | |||
@@ -63,7 +63,7 @@ static void __devinit pci_fixup_video(struct pci_dev *pdev) | |||
63 | pci_read_config_word(pdev, PCI_COMMAND, &config); | 63 | pci_read_config_word(pdev, PCI_COMMAND, &config); |
64 | if (config & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) { | 64 | if (config & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) { |
65 | pdev->resource[PCI_ROM_RESOURCE].flags |= IORESOURCE_ROM_SHADOW; | 65 | pdev->resource[PCI_ROM_RESOURCE].flags |= IORESOURCE_ROM_SHADOW; |
66 | printk(KERN_DEBUG "Boot video device is %s\n", pci_name(pdev)); | 66 | dev_printk(KERN_DEBUG, &pdev->dev, "Boot video device\n"); |
67 | } | 67 | } |
68 | } | 68 | } |
69 | DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_video); | 69 | DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_video); |
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c index 8fd7e825192b..e282c348dcde 100644 --- a/arch/ia64/pci/pci.c +++ b/arch/ia64/pci/pci.c | |||
@@ -765,7 +765,7 @@ static void __init set_pci_cacheline_size(void) | |||
765 | status = ia64_pal_cache_summary(&levels, &unique_caches); | 765 | status = ia64_pal_cache_summary(&levels, &unique_caches); |
766 | if (status != 0) { | 766 | if (status != 0) { |
767 | printk(KERN_ERR "%s: ia64_pal_cache_summary() failed " | 767 | printk(KERN_ERR "%s: ia64_pal_cache_summary() failed " |
768 | "(status=%ld)\n", __FUNCTION__, status); | 768 | "(status=%ld)\n", __func__, status); |
769 | return; | 769 | return; |
770 | } | 770 | } |
771 | 771 | ||
@@ -773,7 +773,7 @@ static void __init set_pci_cacheline_size(void) | |||
773 | /* cache_type (data_or_unified)= */ 2, &cci); | 773 | /* cache_type (data_or_unified)= */ 2, &cci); |
774 | if (status != 0) { | 774 | if (status != 0) { |
775 | printk(KERN_ERR "%s: ia64_pal_cache_config_info() failed " | 775 | printk(KERN_ERR "%s: ia64_pal_cache_config_info() failed " |
776 | "(status=%ld)\n", __FUNCTION__, status); | 776 | "(status=%ld)\n", __func__, status); |
777 | return; | 777 | return; |
778 | } | 778 | } |
779 | pci_cache_line_size = (1 << cci.pcci_line_size) / 4; | 779 | pci_cache_line_size = (1 << cci.pcci_line_size) / 4; |
diff --git a/arch/ia64/sn/kernel/huberror.c b/arch/ia64/sn/kernel/huberror.c index b663168da55c..0101c7924a4d 100644 --- a/arch/ia64/sn/kernel/huberror.c +++ b/arch/ia64/sn/kernel/huberror.c | |||
@@ -37,7 +37,7 @@ static irqreturn_t hub_eint_handler(int irq, void *arg) | |||
37 | (u64) nasid, 0, 0, 0, 0, 0, 0); | 37 | (u64) nasid, 0, 0, 0, 0, 0, 0); |
38 | 38 | ||
39 | if ((int)ret_stuff.v0) | 39 | if ((int)ret_stuff.v0) |
40 | panic("%s: Fatal %s Error", __FUNCTION__, | 40 | panic("%s: Fatal %s Error", __func__, |
41 | ((nasid & 1) ? "TIO" : "HUBII")); | 41 | ((nasid & 1) ? "TIO" : "HUBII")); |
42 | 42 | ||
43 | if (!(nasid & 1)) /* Not a TIO, handle CRB errors */ | 43 | if (!(nasid & 1)) /* Not a TIO, handle CRB errors */ |
@@ -48,7 +48,7 @@ static irqreturn_t hub_eint_handler(int irq, void *arg) | |||
48 | (u64) nasid, 0, 0, 0, 0, 0, 0); | 48 | (u64) nasid, 0, 0, 0, 0, 0, 0); |
49 | 49 | ||
50 | if ((int)ret_stuff.v0) | 50 | if ((int)ret_stuff.v0) |
51 | panic("%s: Fatal TIO Error", __FUNCTION__); | 51 | panic("%s: Fatal TIO Error", __func__); |
52 | } else | 52 | } else |
53 | bte_error_handler((unsigned long)NODEPDA(nasid_to_cnodeid(nasid))); | 53 | bte_error_handler((unsigned long)NODEPDA(nasid_to_cnodeid(nasid))); |
54 | 54 | ||
diff --git a/arch/ia64/sn/kernel/io_acpi_init.c b/arch/ia64/sn/kernel/io_acpi_init.c index 3c7178f5dce8..6568942a95f0 100644 --- a/arch/ia64/sn/kernel/io_acpi_init.c +++ b/arch/ia64/sn/kernel/io_acpi_init.c | |||
@@ -133,7 +133,7 @@ sn_get_bussoft_ptr(struct pci_bus *bus) | |||
133 | if (ACPI_FAILURE(status)) { | 133 | if (ACPI_FAILURE(status)) { |
134 | printk(KERN_ERR "%s: " | 134 | printk(KERN_ERR "%s: " |
135 | "acpi_get_vendor_resource() failed (0x%x) for: ", | 135 | "acpi_get_vendor_resource() failed (0x%x) for: ", |
136 | __FUNCTION__, status); | 136 | __func__, status); |
137 | acpi_ns_print_node_pathname(handle, NULL); | 137 | acpi_ns_print_node_pathname(handle, NULL); |
138 | printk("\n"); | 138 | printk("\n"); |
139 | return NULL; | 139 | return NULL; |
@@ -145,7 +145,7 @@ sn_get_bussoft_ptr(struct pci_bus *bus) | |||
145 | sizeof(struct pcibus_bussoft *)) { | 145 | sizeof(struct pcibus_bussoft *)) { |
146 | printk(KERN_ERR | 146 | printk(KERN_ERR |
147 | "%s: Invalid vendor data length %d\n", | 147 | "%s: Invalid vendor data length %d\n", |
148 | __FUNCTION__, vendor->byte_length); | 148 | __func__, vendor->byte_length); |
149 | kfree(buffer.pointer); | 149 | kfree(buffer.pointer); |
150 | return NULL; | 150 | return NULL; |
151 | } | 151 | } |
@@ -184,7 +184,7 @@ sn_extract_device_info(acpi_handle handle, struct pcidev_info **pcidev_info, | |||
184 | if (ACPI_FAILURE(status)) { | 184 | if (ACPI_FAILURE(status)) { |
185 | printk(KERN_ERR | 185 | printk(KERN_ERR |
186 | "%s: acpi_get_vendor_resource() failed (0x%x) for: ", | 186 | "%s: acpi_get_vendor_resource() failed (0x%x) for: ", |
187 | __FUNCTION__, status); | 187 | __func__, status); |
188 | acpi_ns_print_node_pathname(handle, NULL); | 188 | acpi_ns_print_node_pathname(handle, NULL); |
189 | printk("\n"); | 189 | printk("\n"); |
190 | return 1; | 190 | return 1; |
@@ -196,7 +196,7 @@ sn_extract_device_info(acpi_handle handle, struct pcidev_info **pcidev_info, | |||
196 | sizeof(struct pci_devdev_info *)) { | 196 | sizeof(struct pci_devdev_info *)) { |
197 | printk(KERN_ERR | 197 | printk(KERN_ERR |
198 | "%s: Invalid vendor data length: %d for: ", | 198 | "%s: Invalid vendor data length: %d for: ", |
199 | __FUNCTION__, vendor->byte_length); | 199 | __func__, vendor->byte_length); |
200 | acpi_ns_print_node_pathname(handle, NULL); | 200 | acpi_ns_print_node_pathname(handle, NULL); |
201 | printk("\n"); | 201 | printk("\n"); |
202 | ret = 1; | 202 | ret = 1; |
@@ -205,7 +205,7 @@ sn_extract_device_info(acpi_handle handle, struct pcidev_info **pcidev_info, | |||
205 | 205 | ||
206 | pcidev_ptr = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL); | 206 | pcidev_ptr = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL); |
207 | if (!pcidev_ptr) | 207 | if (!pcidev_ptr) |
208 | panic("%s: Unable to alloc memory for pcidev_info", __FUNCTION__); | 208 | panic("%s: Unable to alloc memory for pcidev_info", __func__); |
209 | 209 | ||
210 | memcpy(&addr, vendor->byte_data, sizeof(struct pcidev_info *)); | 210 | memcpy(&addr, vendor->byte_data, sizeof(struct pcidev_info *)); |
211 | pcidev_prom_ptr = __va(addr); | 211 | pcidev_prom_ptr = __va(addr); |
@@ -214,7 +214,7 @@ sn_extract_device_info(acpi_handle handle, struct pcidev_info **pcidev_info, | |||
214 | /* Get the IRQ info */ | 214 | /* Get the IRQ info */ |
215 | irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL); | 215 | irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL); |
216 | if (!irq_info) | 216 | if (!irq_info) |
217 | panic("%s: Unable to alloc memory for sn_irq_info", __FUNCTION__); | 217 | panic("%s: Unable to alloc memory for sn_irq_info", __func__); |
218 | 218 | ||
219 | if (pcidev_ptr->pdi_sn_irq_info) { | 219 | if (pcidev_ptr->pdi_sn_irq_info) { |
220 | irq_info_prom = __va(pcidev_ptr->pdi_sn_irq_info); | 220 | irq_info_prom = __va(pcidev_ptr->pdi_sn_irq_info); |
@@ -249,10 +249,10 @@ get_host_devfn(acpi_handle device_handle, acpi_handle rootbus_handle) | |||
249 | status = acpi_get_parent(child, &parent); | 249 | status = acpi_get_parent(child, &parent); |
250 | if (ACPI_FAILURE(status)) { | 250 | if (ACPI_FAILURE(status)) { |
251 | printk(KERN_ERR "%s: acpi_get_parent() failed " | 251 | printk(KERN_ERR "%s: acpi_get_parent() failed " |
252 | "(0x%x) for: ", __FUNCTION__, status); | 252 | "(0x%x) for: ", __func__, status); |
253 | acpi_ns_print_node_pathname(child, NULL); | 253 | acpi_ns_print_node_pathname(child, NULL); |
254 | printk("\n"); | 254 | printk("\n"); |
255 | panic("%s: Unable to find host devfn\n", __FUNCTION__); | 255 | panic("%s: Unable to find host devfn\n", __func__); |
256 | } | 256 | } |
257 | if (parent == rootbus_handle) | 257 | if (parent == rootbus_handle) |
258 | break; | 258 | break; |
@@ -260,7 +260,7 @@ get_host_devfn(acpi_handle device_handle, acpi_handle rootbus_handle) | |||
260 | } | 260 | } |
261 | if (!child) { | 261 | if (!child) { |
262 | printk(KERN_ERR "%s: Unable to find root bus for: ", | 262 | printk(KERN_ERR "%s: Unable to find root bus for: ", |
263 | __FUNCTION__); | 263 | __func__); |
264 | acpi_ns_print_node_pathname(device_handle, NULL); | 264 | acpi_ns_print_node_pathname(device_handle, NULL); |
265 | printk("\n"); | 265 | printk("\n"); |
266 | BUG(); | 266 | BUG(); |
@@ -269,10 +269,10 @@ get_host_devfn(acpi_handle device_handle, acpi_handle rootbus_handle) | |||
269 | status = acpi_evaluate_integer(child, METHOD_NAME__ADR, NULL, &adr); | 269 | status = acpi_evaluate_integer(child, METHOD_NAME__ADR, NULL, &adr); |
270 | if (ACPI_FAILURE(status)) { | 270 | if (ACPI_FAILURE(status)) { |
271 | printk(KERN_ERR "%s: Unable to get _ADR (0x%x) for: ", | 271 | printk(KERN_ERR "%s: Unable to get _ADR (0x%x) for: ", |
272 | __FUNCTION__, status); | 272 | __func__, status); |
273 | acpi_ns_print_node_pathname(child, NULL); | 273 | acpi_ns_print_node_pathname(child, NULL); |
274 | printk("\n"); | 274 | printk("\n"); |
275 | panic("%s: Unable to find host devfn\n", __FUNCTION__); | 275 | panic("%s: Unable to find host devfn\n", __func__); |
276 | } | 276 | } |
277 | 277 | ||
278 | slot = (adr >> 16) & 0xffff; | 278 | slot = (adr >> 16) & 0xffff; |
@@ -308,7 +308,7 @@ find_matching_device(acpi_handle handle, u32 lvl, void *context, void **rv) | |||
308 | if (ACPI_FAILURE(status)) { | 308 | if (ACPI_FAILURE(status)) { |
309 | printk(KERN_ERR | 309 | printk(KERN_ERR |
310 | "%s: acpi_get_parent() failed (0x%x) for: ", | 310 | "%s: acpi_get_parent() failed (0x%x) for: ", |
311 | __FUNCTION__, status); | 311 | __func__, status); |
312 | acpi_ns_print_node_pathname(handle, NULL); | 312 | acpi_ns_print_node_pathname(handle, NULL); |
313 | printk("\n"); | 313 | printk("\n"); |
314 | return AE_OK; | 314 | return AE_OK; |
@@ -318,7 +318,7 @@ find_matching_device(acpi_handle handle, u32 lvl, void *context, void **rv) | |||
318 | if (ACPI_FAILURE(status)) { | 318 | if (ACPI_FAILURE(status)) { |
319 | printk(KERN_ERR | 319 | printk(KERN_ERR |
320 | "%s: Failed to find _BBN in parent of: ", | 320 | "%s: Failed to find _BBN in parent of: ", |
321 | __FUNCTION__); | 321 | __func__); |
322 | acpi_ns_print_node_pathname(handle, NULL); | 322 | acpi_ns_print_node_pathname(handle, NULL); |
323 | printk("\n"); | 323 | printk("\n"); |
324 | return AE_OK; | 324 | return AE_OK; |
@@ -358,14 +358,14 @@ sn_acpi_get_pcidev_info(struct pci_dev *dev, struct pcidev_info **pcidev_info, | |||
358 | if (segment != pci_domain_nr(dev)) { | 358 | if (segment != pci_domain_nr(dev)) { |
359 | printk(KERN_ERR | 359 | printk(KERN_ERR |
360 | "%s: Segment number mismatch, 0x%lx vs 0x%x for: ", | 360 | "%s: Segment number mismatch, 0x%lx vs 0x%x for: ", |
361 | __FUNCTION__, segment, pci_domain_nr(dev)); | 361 | __func__, segment, pci_domain_nr(dev)); |
362 | acpi_ns_print_node_pathname(rootbus_handle, NULL); | 362 | acpi_ns_print_node_pathname(rootbus_handle, NULL); |
363 | printk("\n"); | 363 | printk("\n"); |
364 | return 1; | 364 | return 1; |
365 | } | 365 | } |
366 | } else { | 366 | } else { |
367 | printk(KERN_ERR "%s: Unable to get __SEG from: ", | 367 | printk(KERN_ERR "%s: Unable to get __SEG from: ", |
368 | __FUNCTION__); | 368 | __func__); |
369 | acpi_ns_print_node_pathname(rootbus_handle, NULL); | 369 | acpi_ns_print_node_pathname(rootbus_handle, NULL); |
370 | printk("\n"); | 370 | printk("\n"); |
371 | return 1; | 371 | return 1; |
@@ -386,7 +386,7 @@ sn_acpi_get_pcidev_info(struct pci_dev *dev, struct pcidev_info **pcidev_info, | |||
386 | if (!pcidev_match.handle) { | 386 | if (!pcidev_match.handle) { |
387 | printk(KERN_ERR | 387 | printk(KERN_ERR |
388 | "%s: Could not find matching ACPI device for %s.\n", | 388 | "%s: Could not find matching ACPI device for %s.\n", |
389 | __FUNCTION__, pci_name(dev)); | 389 | __func__, pci_name(dev)); |
390 | return 1; | 390 | return 1; |
391 | } | 391 | } |
392 | 392 | ||
@@ -422,7 +422,7 @@ sn_acpi_slot_fixup(struct pci_dev *dev) | |||
422 | 422 | ||
423 | if (sn_acpi_get_pcidev_info(dev, &pcidev_info, &sn_irq_info)) { | 423 | if (sn_acpi_get_pcidev_info(dev, &pcidev_info, &sn_irq_info)) { |
424 | panic("%s: Failure obtaining pcidev_info for %s\n", | 424 | panic("%s: Failure obtaining pcidev_info for %s\n", |
425 | __FUNCTION__, pci_name(dev)); | 425 | __func__, pci_name(dev)); |
426 | } | 426 | } |
427 | 427 | ||
428 | if (pcidev_info->pdi_pio_mapped_addr[PCI_ROM_RESOURCE]) { | 428 | if (pcidev_info->pdi_pio_mapped_addr[PCI_ROM_RESOURCE]) { |
@@ -463,7 +463,7 @@ sn_acpi_bus_fixup(struct pci_bus *bus) | |||
463 | printk(KERN_ERR | 463 | printk(KERN_ERR |
464 | "%s: 0x%04x:0x%02x Unable to " | 464 | "%s: 0x%04x:0x%02x Unable to " |
465 | "obtain prom_bussoft_ptr\n", | 465 | "obtain prom_bussoft_ptr\n", |
466 | __FUNCTION__, pci_domain_nr(bus), bus->number); | 466 | __func__, pci_domain_nr(bus), bus->number); |
467 | return; | 467 | return; |
468 | } | 468 | } |
469 | sn_common_bus_fixup(bus, prom_bussoft_ptr); | 469 | sn_common_bus_fixup(bus, prom_bussoft_ptr); |
diff --git a/arch/ia64/sn/kernel/io_common.c b/arch/ia64/sn/kernel/io_common.c index c4eb84f9e781..8a924a5661dd 100644 --- a/arch/ia64/sn/kernel/io_common.c +++ b/arch/ia64/sn/kernel/io_common.c | |||
@@ -364,7 +364,7 @@ void sn_bus_store_sysdata(struct pci_dev *dev) | |||
364 | 364 | ||
365 | element = kzalloc(sizeof(struct sysdata_el), GFP_KERNEL); | 365 | element = kzalloc(sizeof(struct sysdata_el), GFP_KERNEL); |
366 | if (!element) { | 366 | if (!element) { |
367 | dev_dbg(&dev->dev, "%s: out of memory!\n", __FUNCTION__); | 367 | dev_dbg(&dev->dev, "%s: out of memory!\n", __func__); |
368 | return; | 368 | return; |
369 | } | 369 | } |
370 | element->sysdata = SN_PCIDEV_INFO(dev); | 370 | element->sysdata = SN_PCIDEV_INFO(dev); |
diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c index 906b93674b76..c3aa851d1ca6 100644 --- a/arch/ia64/sn/kernel/io_init.c +++ b/arch/ia64/sn/kernel/io_init.c | |||
@@ -209,11 +209,11 @@ sn_io_slot_fixup(struct pci_dev *dev) | |||
209 | 209 | ||
210 | pcidev_info = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL); | 210 | pcidev_info = kzalloc(sizeof(struct pcidev_info), GFP_KERNEL); |
211 | if (!pcidev_info) | 211 | if (!pcidev_info) |
212 | panic("%s: Unable to alloc memory for pcidev_info", __FUNCTION__); | 212 | panic("%s: Unable to alloc memory for pcidev_info", __func__); |
213 | 213 | ||
214 | sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL); | 214 | sn_irq_info = kzalloc(sizeof(struct sn_irq_info), GFP_KERNEL); |
215 | if (!sn_irq_info) | 215 | if (!sn_irq_info) |
216 | panic("%s: Unable to alloc memory for sn_irq_info", __FUNCTION__); | 216 | panic("%s: Unable to alloc memory for sn_irq_info", __func__); |
217 | 217 | ||
218 | /* Call to retrieve pci device information needed by kernel. */ | 218 | /* Call to retrieve pci device information needed by kernel. */ |
219 | status = sal_get_pcidev_info((u64) pci_domain_nr(dev), | 219 | status = sal_get_pcidev_info((u64) pci_domain_nr(dev), |
diff --git a/arch/ia64/sn/kernel/mca.c b/arch/ia64/sn/kernel/mca.c index 868c9aa64fe2..27793f7aa99c 100644 --- a/arch/ia64/sn/kernel/mca.c +++ b/arch/ia64/sn/kernel/mca.c | |||
@@ -100,7 +100,7 @@ sn_platform_plat_specific_err_print(const u8 * sect_header, u8 ** oemdata, | |||
100 | if (!newbuf) { | 100 | if (!newbuf) { |
101 | mutex_unlock(&sn_oemdata_mutex); | 101 | mutex_unlock(&sn_oemdata_mutex); |
102 | printk(KERN_ERR "%s: unable to extend sn_oemdata\n", | 102 | printk(KERN_ERR "%s: unable to extend sn_oemdata\n", |
103 | __FUNCTION__); | 103 | __func__); |
104 | return 1; | 104 | return 1; |
105 | } | 105 | } |
106 | vfree(*sn_oemdata); | 106 | vfree(*sn_oemdata); |
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c index 511db2fd7bff..18b94b792d54 100644 --- a/arch/ia64/sn/pci/pci_dma.c +++ b/arch/ia64/sn/pci/pci_dma.c | |||
@@ -116,7 +116,7 @@ void *sn_dma_alloc_coherent(struct device *dev, size_t size, | |||
116 | *dma_handle = provider->dma_map_consistent(pdev, phys_addr, size, | 116 | *dma_handle = provider->dma_map_consistent(pdev, phys_addr, size, |
117 | SN_DMA_ADDR_PHYS); | 117 | SN_DMA_ADDR_PHYS); |
118 | if (!*dma_handle) { | 118 | if (!*dma_handle) { |
119 | printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__); | 119 | printk(KERN_ERR "%s: out of ATEs\n", __func__); |
120 | free_pages((unsigned long)cpuaddr, get_order(size)); | 120 | free_pages((unsigned long)cpuaddr, get_order(size)); |
121 | return NULL; | 121 | return NULL; |
122 | } | 122 | } |
@@ -179,7 +179,7 @@ dma_addr_t sn_dma_map_single(struct device *dev, void *cpu_addr, size_t size, | |||
179 | phys_addr = __pa(cpu_addr); | 179 | phys_addr = __pa(cpu_addr); |
180 | dma_addr = provider->dma_map(pdev, phys_addr, size, SN_DMA_ADDR_PHYS); | 180 | dma_addr = provider->dma_map(pdev, phys_addr, size, SN_DMA_ADDR_PHYS); |
181 | if (!dma_addr) { | 181 | if (!dma_addr) { |
182 | printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__); | 182 | printk(KERN_ERR "%s: out of ATEs\n", __func__); |
183 | return 0; | 183 | return 0; |
184 | } | 184 | } |
185 | return dma_addr; | 185 | return dma_addr; |
@@ -266,7 +266,7 @@ int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, int nhwentries, | |||
266 | SN_DMA_ADDR_PHYS); | 266 | SN_DMA_ADDR_PHYS); |
267 | 267 | ||
268 | if (!sg->dma_address) { | 268 | if (!sg->dma_address) { |
269 | printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__); | 269 | printk(KERN_ERR "%s: out of ATEs\n", __func__); |
270 | 270 | ||
271 | /* | 271 | /* |
272 | * Free any successfully allocated entries. | 272 | * Free any successfully allocated entries. |
diff --git a/arch/ia64/sn/pci/tioca_provider.c b/arch/ia64/sn/pci/tioca_provider.c index ef048a674772..529462c01570 100644 --- a/arch/ia64/sn/pci/tioca_provider.c +++ b/arch/ia64/sn/pci/tioca_provider.c | |||
@@ -88,7 +88,7 @@ tioca_gart_init(struct tioca_kernel *tioca_kern) | |||
88 | break; | 88 | break; |
89 | default: | 89 | default: |
90 | printk(KERN_ERR "%s: Invalid CA_APERATURE_SIZE " | 90 | printk(KERN_ERR "%s: Invalid CA_APERATURE_SIZE " |
91 | "0x%lx\n", __FUNCTION__, (ulong) CA_APERATURE_SIZE); | 91 | "0x%lx\n", __func__, (ulong) CA_APERATURE_SIZE); |
92 | return -1; | 92 | return -1; |
93 | } | 93 | } |
94 | 94 | ||
@@ -124,7 +124,7 @@ tioca_gart_init(struct tioca_kernel *tioca_kern) | |||
124 | if (!tmp) { | 124 | if (!tmp) { |
125 | printk(KERN_ERR "%s: Could not allocate " | 125 | printk(KERN_ERR "%s: Could not allocate " |
126 | "%lu bytes (order %d) for GART\n", | 126 | "%lu bytes (order %d) for GART\n", |
127 | __FUNCTION__, | 127 | __func__, |
128 | tioca_kern->ca_gart_size, | 128 | tioca_kern->ca_gart_size, |
129 | get_order(tioca_kern->ca_gart_size)); | 129 | get_order(tioca_kern->ca_gart_size)); |
130 | return -ENOMEM; | 130 | return -ENOMEM; |
@@ -341,7 +341,7 @@ tioca_dma_d48(struct pci_dev *pdev, u64 paddr) | |||
341 | 341 | ||
342 | if (node_upper > 64) { | 342 | if (node_upper > 64) { |
343 | printk(KERN_ERR "%s: coretalk addr 0x%p node id out " | 343 | printk(KERN_ERR "%s: coretalk addr 0x%p node id out " |
344 | "of range\n", __FUNCTION__, (void *)ct_addr); | 344 | "of range\n", __func__, (void *)ct_addr); |
345 | return 0; | 345 | return 0; |
346 | } | 346 | } |
347 | 347 | ||
@@ -349,7 +349,7 @@ tioca_dma_d48(struct pci_dev *pdev, u64 paddr) | |||
349 | if (node_upper != (agp_dma_extn >> CA_AGP_DMA_NODE_ID_SHFT)) { | 349 | if (node_upper != (agp_dma_extn >> CA_AGP_DMA_NODE_ID_SHFT)) { |
350 | printk(KERN_ERR "%s: coretalk upper node (%u) " | 350 | printk(KERN_ERR "%s: coretalk upper node (%u) " |
351 | "mismatch with ca_agp_dma_addr_extn (%lu)\n", | 351 | "mismatch with ca_agp_dma_addr_extn (%lu)\n", |
352 | __FUNCTION__, | 352 | __func__, |
353 | node_upper, (agp_dma_extn >> CA_AGP_DMA_NODE_ID_SHFT)); | 353 | node_upper, (agp_dma_extn >> CA_AGP_DMA_NODE_ID_SHFT)); |
354 | return 0; | 354 | return 0; |
355 | } | 355 | } |
@@ -597,7 +597,7 @@ tioca_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont | |||
597 | if (is_shub1() && sn_sal_rev() < 0x0406) { | 597 | if (is_shub1() && sn_sal_rev() < 0x0406) { |
598 | printk | 598 | printk |
599 | (KERN_ERR "%s: SGI prom rev 4.06 or greater required " | 599 | (KERN_ERR "%s: SGI prom rev 4.06 or greater required " |
600 | "for tioca support\n", __FUNCTION__); | 600 | "for tioca support\n", __func__); |
601 | return NULL; | 601 | return NULL; |
602 | } | 602 | } |
603 | 603 | ||
@@ -651,7 +651,7 @@ tioca_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont | |||
651 | printk(KERN_WARNING | 651 | printk(KERN_WARNING |
652 | "%s: Unable to get irq %d. " | 652 | "%s: Unable to get irq %d. " |
653 | "Error interrupts won't be routed for TIOCA bus %d\n", | 653 | "Error interrupts won't be routed for TIOCA bus %d\n", |
654 | __FUNCTION__, SGI_TIOCA_ERROR, | 654 | __func__, SGI_TIOCA_ERROR, |
655 | (int)tioca_common->ca_common.bs_persist_busnum); | 655 | (int)tioca_common->ca_common.bs_persist_busnum); |
656 | 656 | ||
657 | sn_set_err_irq_affinity(SGI_TIOCA_ERROR); | 657 | sn_set_err_irq_affinity(SGI_TIOCA_ERROR); |
diff --git a/arch/ia64/sn/pci/tioce_provider.c b/arch/ia64/sn/pci/tioce_provider.c index 999f14f986e2..9b3c11373022 100644 --- a/arch/ia64/sn/pci/tioce_provider.c +++ b/arch/ia64/sn/pci/tioce_provider.c | |||
@@ -494,7 +494,7 @@ tioce_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir) | |||
494 | if (&map->ce_dmamap_list == &ce_kern->ce_dmamap_list) { | 494 | if (&map->ce_dmamap_list == &ce_kern->ce_dmamap_list) { |
495 | printk(KERN_WARNING | 495 | printk(KERN_WARNING |
496 | "%s: %s - no map found for bus_addr 0x%lx\n", | 496 | "%s: %s - no map found for bus_addr 0x%lx\n", |
497 | __FUNCTION__, pci_name(pdev), bus_addr); | 497 | __func__, pci_name(pdev), bus_addr); |
498 | } else if (--map->refcnt == 0) { | 498 | } else if (--map->refcnt == 0) { |
499 | for (i = 0; i < map->ate_count; i++) { | 499 | for (i = 0; i < map->ate_count; i++) { |
500 | map->ate_shadow[i] = 0; | 500 | map->ate_shadow[i] = 0; |
@@ -1030,7 +1030,7 @@ tioce_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont | |||
1030 | "%s: Unable to get irq %d. " | 1030 | "%s: Unable to get irq %d. " |
1031 | "Error interrupts won't be routed for " | 1031 | "Error interrupts won't be routed for " |
1032 | "TIOCE bus %04x:%02x\n", | 1032 | "TIOCE bus %04x:%02x\n", |
1033 | __FUNCTION__, SGI_PCIASIC_ERROR, | 1033 | __func__, SGI_PCIASIC_ERROR, |
1034 | tioce_common->ce_pcibus.bs_persist_segment, | 1034 | tioce_common->ce_pcibus.bs_persist_segment, |
1035 | tioce_common->ce_pcibus.bs_persist_busnum); | 1035 | tioce_common->ce_pcibus.bs_persist_busnum); |
1036 | 1036 | ||
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 9892827b6176..1831833c430e 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -351,6 +351,10 @@ endchoice | |||
351 | 351 | ||
352 | source "fs/Kconfig.binfmt" | 352 | source "fs/Kconfig.binfmt" |
353 | 353 | ||
354 | config FORCE_MAX_ZONEORDER | ||
355 | int | ||
356 | default "9" | ||
357 | |||
354 | config PROCESS_DEBUG | 358 | config PROCESS_DEBUG |
355 | bool "Show crashed user process info" | 359 | bool "Show crashed user process info" |
356 | help | 360 | help |
diff --git a/arch/s390/defconfig b/arch/s390/defconfig index 39921f3a9685..62f6b5a606dd 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig | |||
@@ -1,7 +1,7 @@ | |||
1 | # | 1 | # |
2 | # Automatically generated make config: don't edit | 2 | # Automatically generated make config: don't edit |
3 | # Linux kernel version: 2.6.24 | 3 | # Linux kernel version: 2.6.25-rc4 |
4 | # Sat Feb 9 12:13:01 2008 | 4 | # Wed Mar 5 11:22:59 2008 |
5 | # | 5 | # |
6 | CONFIG_MMU=y | 6 | CONFIG_MMU=y |
7 | CONFIG_ZONE_DMA=y | 7 | CONFIG_ZONE_DMA=y |
@@ -43,12 +43,15 @@ CONFIG_CGROUPS=y | |||
43 | # CONFIG_CGROUP_DEBUG is not set | 43 | # CONFIG_CGROUP_DEBUG is not set |
44 | CONFIG_CGROUP_NS=y | 44 | CONFIG_CGROUP_NS=y |
45 | # CONFIG_CPUSETS is not set | 45 | # CONFIG_CPUSETS is not set |
46 | CONFIG_GROUP_SCHED=y | ||
46 | CONFIG_FAIR_GROUP_SCHED=y | 47 | CONFIG_FAIR_GROUP_SCHED=y |
47 | CONFIG_FAIR_USER_SCHED=y | 48 | # CONFIG_RT_GROUP_SCHED is not set |
48 | # CONFIG_FAIR_CGROUP_SCHED is not set | 49 | CONFIG_USER_SCHED=y |
50 | # CONFIG_CGROUP_SCHED is not set | ||
49 | # CONFIG_CGROUP_CPUACCT is not set | 51 | # CONFIG_CGROUP_CPUACCT is not set |
50 | # CONFIG_RESOURCE_COUNTERS is not set | 52 | # CONFIG_RESOURCE_COUNTERS is not set |
51 | CONFIG_SYSFS_DEPRECATED=y | 53 | CONFIG_SYSFS_DEPRECATED=y |
54 | CONFIG_SYSFS_DEPRECATED_V2=y | ||
52 | # CONFIG_RELAY is not set | 55 | # CONFIG_RELAY is not set |
53 | CONFIG_NAMESPACES=y | 56 | CONFIG_NAMESPACES=y |
54 | CONFIG_UTS_NS=y | 57 | CONFIG_UTS_NS=y |
@@ -85,7 +88,9 @@ CONFIG_SLAB=y | |||
85 | # CONFIG_MARKERS is not set | 88 | # CONFIG_MARKERS is not set |
86 | CONFIG_HAVE_OPROFILE=y | 89 | CONFIG_HAVE_OPROFILE=y |
87 | CONFIG_KPROBES=y | 90 | CONFIG_KPROBES=y |
91 | CONFIG_KRETPROBES=y | ||
88 | CONFIG_HAVE_KPROBES=y | 92 | CONFIG_HAVE_KPROBES=y |
93 | CONFIG_HAVE_KRETPROBES=y | ||
89 | CONFIG_PROC_PAGE_MONITOR=y | 94 | CONFIG_PROC_PAGE_MONITOR=y |
90 | CONFIG_SLABINFO=y | 95 | CONFIG_SLABINFO=y |
91 | CONFIG_RT_MUTEXES=y | 96 | CONFIG_RT_MUTEXES=y |
@@ -185,6 +190,7 @@ CONFIG_IPL=y | |||
185 | CONFIG_IPL_VM=y | 190 | CONFIG_IPL_VM=y |
186 | CONFIG_BINFMT_ELF=y | 191 | CONFIG_BINFMT_ELF=y |
187 | CONFIG_BINFMT_MISC=m | 192 | CONFIG_BINFMT_MISC=m |
193 | CONFIG_FORCE_MAX_ZONEORDER=9 | ||
188 | # CONFIG_PROCESS_DEBUG is not set | 194 | # CONFIG_PROCESS_DEBUG is not set |
189 | CONFIG_PFAULT=y | 195 | CONFIG_PFAULT=y |
190 | # CONFIG_SHARED_KERNEL is not set | 196 | # CONFIG_SHARED_KERNEL is not set |
@@ -435,6 +441,7 @@ CONFIG_DASD_EER=y | |||
435 | CONFIG_MISC_DEVICES=y | 441 | CONFIG_MISC_DEVICES=y |
436 | # CONFIG_EEPROM_93CX6 is not set | 442 | # CONFIG_EEPROM_93CX6 is not set |
437 | # CONFIG_ENCLOSURE_SERVICES is not set | 443 | # CONFIG_ENCLOSURE_SERVICES is not set |
444 | # CONFIG_HAVE_IDE is not set | ||
438 | 445 | ||
439 | # | 446 | # |
440 | # SCSI device support | 447 | # SCSI device support |
@@ -593,6 +600,7 @@ CONFIG_S390_VMUR=m | |||
593 | # | 600 | # |
594 | # Sonics Silicon Backplane | 601 | # Sonics Silicon Backplane |
595 | # | 602 | # |
603 | # CONFIG_MEMSTICK is not set | ||
596 | 604 | ||
597 | # | 605 | # |
598 | # File systems | 606 | # File systems |
@@ -750,7 +758,6 @@ CONFIG_DEBUG_BUGVERBOSE=y | |||
750 | # CONFIG_DEBUG_LIST is not set | 758 | # CONFIG_DEBUG_LIST is not set |
751 | # CONFIG_DEBUG_SG is not set | 759 | # CONFIG_DEBUG_SG is not set |
752 | # CONFIG_FRAME_POINTER is not set | 760 | # CONFIG_FRAME_POINTER is not set |
753 | CONFIG_FORCED_INLINING=y | ||
754 | # CONFIG_RCU_TORTURE_TEST is not set | 761 | # CONFIG_RCU_TORTURE_TEST is not set |
755 | # CONFIG_KPROBES_SANITY_TEST is not set | 762 | # CONFIG_KPROBES_SANITY_TEST is not set |
756 | # CONFIG_BACKTRACE_SELF_TEST is not set | 763 | # CONFIG_BACKTRACE_SELF_TEST is not set |
@@ -759,6 +766,7 @@ CONFIG_FORCED_INLINING=y | |||
759 | # CONFIG_LATENCYTOP is not set | 766 | # CONFIG_LATENCYTOP is not set |
760 | CONFIG_SAMPLES=y | 767 | CONFIG_SAMPLES=y |
761 | # CONFIG_SAMPLE_KOBJECT is not set | 768 | # CONFIG_SAMPLE_KOBJECT is not set |
769 | # CONFIG_SAMPLE_KPROBES is not set | ||
762 | # CONFIG_DEBUG_PAGEALLOC is not set | 770 | # CONFIG_DEBUG_PAGEALLOC is not set |
763 | 771 | ||
764 | # | 772 | # |
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index b3b650a93c7c..4d3e38392cb1 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile | |||
@@ -4,6 +4,11 @@ | |||
4 | 4 | ||
5 | EXTRA_AFLAGS := -traditional | 5 | EXTRA_AFLAGS := -traditional |
6 | 6 | ||
7 | # | ||
8 | # Passing null pointers is ok for smp code, since we access the lowcore here. | ||
9 | # | ||
10 | CFLAGS_smp.o := -Wno-nonnull | ||
11 | |||
7 | obj-y := bitmap.o traps.o time.o process.o base.o early.o \ | 12 | obj-y := bitmap.o traps.o time.o process.o base.o early.o \ |
8 | setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ | 13 | setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ |
9 | semaphore.o s390_ext.o debug.o irq.o ipl.o dis.o diag.o | 14 | semaphore.o s390_ext.o debug.o irq.o ipl.o dis.o diag.o |
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index 9f7b73b180f0..01832c440636 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c | |||
@@ -88,13 +88,17 @@ static noinline __init void create_kernel_nss(void) | |||
88 | 88 | ||
89 | __cpcmd(defsys_cmd, NULL, 0, &response); | 89 | __cpcmd(defsys_cmd, NULL, 0, &response); |
90 | 90 | ||
91 | if (response != 0) | 91 | if (response != 0) { |
92 | kernel_nss_name[0] = '\0'; | ||
92 | return; | 93 | return; |
94 | } | ||
93 | 95 | ||
94 | __cpcmd(savesys_cmd, NULL, 0, &response); | 96 | __cpcmd(savesys_cmd, NULL, 0, &response); |
95 | 97 | ||
96 | if (response != strlen(savesys_cmd)) | 98 | if (response != strlen(savesys_cmd)) { |
99 | kernel_nss_name[0] = '\0'; | ||
97 | return; | 100 | return; |
101 | } | ||
98 | 102 | ||
99 | ipl_flags = IPL_NSS_VALID; | 103 | ipl_flags = IPL_NSS_VALID; |
100 | } | 104 | } |
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 60acdc266db1..375232c46c7a 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c | |||
@@ -704,6 +704,7 @@ void reipl_run(struct shutdown_trigger *trigger) | |||
704 | default: | 704 | default: |
705 | break; | 705 | break; |
706 | } | 706 | } |
707 | disabled_wait((unsigned long) __builtin_return_address(0)); | ||
707 | } | 708 | } |
708 | 709 | ||
709 | static void __init reipl_probe(void) | 710 | static void __init reipl_probe(void) |
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index 1c59ec161cf8..ce203154d8ce 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c | |||
@@ -152,6 +152,10 @@ static void default_idle(void) | |||
152 | local_mcck_disable(); | 152 | local_mcck_disable(); |
153 | if (test_thread_flag(TIF_MCCK_PENDING)) { | 153 | if (test_thread_flag(TIF_MCCK_PENDING)) { |
154 | local_mcck_enable(); | 154 | local_mcck_enable(); |
155 | /* disable monitor call class 0 */ | ||
156 | __ctl_clear_bit(8, 15); | ||
157 | atomic_notifier_call_chain(&idle_chain, S390_CPU_NOT_IDLE, | ||
158 | hcpu); | ||
155 | local_irq_enable(); | 159 | local_irq_enable(); |
156 | s390_handle_mcck(); | 160 | s390_handle_mcck(); |
157 | return; | 161 | return; |
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 818bd09c0260..8f894d380a62 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -629,14 +629,8 @@ static int __cpuinit smp_alloc_lowcore(int cpu) | |||
629 | panic_stack = __get_free_page(GFP_KERNEL); | 629 | panic_stack = __get_free_page(GFP_KERNEL); |
630 | if (!panic_stack || !async_stack) | 630 | if (!panic_stack || !async_stack) |
631 | goto out; | 631 | goto out; |
632 | /* | 632 | memcpy(lowcore, &S390_lowcore, 512); |
633 | * Only need to copy the first 512 bytes from address 0. But since | 633 | memset((char *)lowcore + 512, 0, sizeof(*lowcore) - 512); |
634 | * the compiler emits a warning if src == NULL for memcpy use copy_page | ||
635 | * instead. Copies more than needed but this code is not performance | ||
636 | * critical. | ||
637 | */ | ||
638 | copy_page(lowcore, &S390_lowcore); | ||
639 | memset((void *)lowcore + 512, 0, sizeof(*lowcore) - 512); | ||
640 | lowcore->async_stack = async_stack + ASYNC_SIZE; | 634 | lowcore->async_stack = async_stack + ASYNC_SIZE; |
641 | lowcore->panic_stack = panic_stack + PAGE_SIZE; | 635 | lowcore->panic_stack = panic_stack + PAGE_SIZE; |
642 | 636 | ||
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index 76a5dd1b4ce9..cb232c155360 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c | |||
@@ -209,8 +209,6 @@ static void stop_hz_timer(void) | |||
209 | */ | 209 | */ |
210 | static void start_hz_timer(void) | 210 | static void start_hz_timer(void) |
211 | { | 211 | { |
212 | BUG_ON(!in_interrupt()); | ||
213 | |||
214 | if (!cpu_isset(smp_processor_id(), nohz_cpu_mask)) | 212 | if (!cpu_isset(smp_processor_id(), nohz_cpu_mask)) |
215 | return; | 213 | return; |
216 | account_ticks(get_clock()); | 214 | account_ticks(get_clock()); |
diff --git a/arch/x86/kernel/cpu/cpufreq/e_powersaver.c b/arch/x86/kernel/cpu/cpufreq/e_powersaver.c index 39f8cb18296c..c2f930d86640 100644 --- a/arch/x86/kernel/cpu/cpufreq/e_powersaver.c +++ b/arch/x86/kernel/cpu/cpufreq/e_powersaver.c | |||
@@ -55,7 +55,6 @@ static int eps_set_state(struct eps_cpu_data *centaur, | |||
55 | { | 55 | { |
56 | struct cpufreq_freqs freqs; | 56 | struct cpufreq_freqs freqs; |
57 | u32 lo, hi; | 57 | u32 lo, hi; |
58 | u8 current_multiplier, current_voltage; | ||
59 | int err = 0; | 58 | int err = 0; |
60 | int i; | 59 | int i; |
61 | 60 | ||
@@ -95,6 +94,10 @@ postchange: | |||
95 | rdmsr(MSR_IA32_PERF_STATUS, lo, hi); | 94 | rdmsr(MSR_IA32_PERF_STATUS, lo, hi); |
96 | freqs.new = centaur->fsb * ((lo >> 8) & 0xff); | 95 | freqs.new = centaur->fsb * ((lo >> 8) & 0xff); |
97 | 96 | ||
97 | #ifdef DEBUG | ||
98 | { | ||
99 | u8 current_multiplier, current_voltage; | ||
100 | |||
98 | /* Print voltage and multiplier */ | 101 | /* Print voltage and multiplier */ |
99 | rdmsr(MSR_IA32_PERF_STATUS, lo, hi); | 102 | rdmsr(MSR_IA32_PERF_STATUS, lo, hi); |
100 | current_voltage = lo & 0xff; | 103 | current_voltage = lo & 0xff; |
@@ -103,7 +106,8 @@ postchange: | |||
103 | current_multiplier = (lo >> 8) & 0xff; | 106 | current_multiplier = (lo >> 8) & 0xff; |
104 | printk(KERN_INFO "eps: Current multiplier = %d\n", | 107 | printk(KERN_INFO "eps: Current multiplier = %d\n", |
105 | current_multiplier); | 108 | current_multiplier); |
106 | 109 | } | |
110 | #endif | ||
107 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | 111 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); |
108 | return err; | 112 | return err; |
109 | } | 113 | } |
diff --git a/crypto/Kconfig b/crypto/Kconfig index 898acc5c1967..69f1be6816f7 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig | |||
@@ -575,6 +575,7 @@ config CRYPTO_TEST | |||
575 | config CRYPTO_AUTHENC | 575 | config CRYPTO_AUTHENC |
576 | tristate "Authenc support" | 576 | tristate "Authenc support" |
577 | select CRYPTO_AEAD | 577 | select CRYPTO_AEAD |
578 | select CRYPTO_BLKCIPHER | ||
578 | select CRYPTO_MANAGER | 579 | select CRYPTO_MANAGER |
579 | select CRYPTO_HASH | 580 | select CRYPTO_HASH |
580 | help | 581 | help |
diff --git a/crypto/Makefile b/crypto/Makefile index 48c758379954..7cf36253a75e 100644 --- a/crypto/Makefile +++ b/crypto/Makefile | |||
@@ -12,9 +12,9 @@ obj-$(CONFIG_CRYPTO_AEAD) += aead.o | |||
12 | 12 | ||
13 | crypto_blkcipher-objs := ablkcipher.o | 13 | crypto_blkcipher-objs := ablkcipher.o |
14 | crypto_blkcipher-objs += blkcipher.o | 14 | crypto_blkcipher-objs += blkcipher.o |
15 | crypto_blkcipher-objs += chainiv.o | ||
16 | crypto_blkcipher-objs += eseqiv.o | ||
15 | obj-$(CONFIG_CRYPTO_BLKCIPHER) += crypto_blkcipher.o | 17 | obj-$(CONFIG_CRYPTO_BLKCIPHER) += crypto_blkcipher.o |
16 | obj-$(CONFIG_CRYPTO_BLKCIPHER) += chainiv.o | ||
17 | obj-$(CONFIG_CRYPTO_BLKCIPHER) += eseqiv.o | ||
18 | obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o | 18 | obj-$(CONFIG_CRYPTO_SEQIV) += seqiv.o |
19 | 19 | ||
20 | crypto_hash-objs := hash.o | 20 | crypto_hash-objs := hash.o |
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c index 3bcb099b4a85..94140b3756fc 100644 --- a/crypto/ablkcipher.c +++ b/crypto/ablkcipher.c | |||
@@ -341,6 +341,3 @@ err: | |||
341 | return ERR_PTR(err); | 341 | return ERR_PTR(err); |
342 | } | 342 | } |
343 | EXPORT_SYMBOL_GPL(crypto_alloc_ablkcipher); | 343 | EXPORT_SYMBOL_GPL(crypto_alloc_ablkcipher); |
344 | |||
345 | MODULE_LICENSE("GPL"); | ||
346 | MODULE_DESCRIPTION("Asynchronous block chaining cipher type"); | ||
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c index 4a7e65c4df4d..185f955fb0d7 100644 --- a/crypto/blkcipher.c +++ b/crypto/blkcipher.c | |||
@@ -696,5 +696,34 @@ void skcipher_geniv_exit(struct crypto_tfm *tfm) | |||
696 | } | 696 | } |
697 | EXPORT_SYMBOL_GPL(skcipher_geniv_exit); | 697 | EXPORT_SYMBOL_GPL(skcipher_geniv_exit); |
698 | 698 | ||
699 | static int __init blkcipher_module_init(void) | ||
700 | { | ||
701 | int err; | ||
702 | |||
703 | err = chainiv_module_init(); | ||
704 | if (err) | ||
705 | goto out; | ||
706 | |||
707 | err = eseqiv_module_init(); | ||
708 | if (err) | ||
709 | goto eseqiv_err; | ||
710 | |||
711 | out: | ||
712 | return err; | ||
713 | |||
714 | eseqiv_err: | ||
715 | chainiv_module_exit(); | ||
716 | goto out; | ||
717 | } | ||
718 | |||
719 | static void __exit blkcipher_module_exit(void) | ||
720 | { | ||
721 | eseqiv_module_exit(); | ||
722 | chainiv_module_exit(); | ||
723 | } | ||
724 | |||
725 | module_init(blkcipher_module_init); | ||
726 | module_exit(blkcipher_module_exit); | ||
727 | |||
699 | MODULE_LICENSE("GPL"); | 728 | MODULE_LICENSE("GPL"); |
700 | MODULE_DESCRIPTION("Generic block chaining cipher type"); | 729 | MODULE_DESCRIPTION("Generic block chaining cipher type"); |
diff --git a/crypto/chainiv.c b/crypto/chainiv.c index d17fa0454dc3..0a7cac6e9089 100644 --- a/crypto/chainiv.c +++ b/crypto/chainiv.c | |||
@@ -314,18 +314,14 @@ static struct crypto_template chainiv_tmpl = { | |||
314 | .module = THIS_MODULE, | 314 | .module = THIS_MODULE, |
315 | }; | 315 | }; |
316 | 316 | ||
317 | static int __init chainiv_module_init(void) | 317 | int __init chainiv_module_init(void) |
318 | { | 318 | { |
319 | return crypto_register_template(&chainiv_tmpl); | 319 | return crypto_register_template(&chainiv_tmpl); |
320 | } | 320 | } |
321 | EXPORT_SYMBOL_GPL(chainiv_module_init); | ||
321 | 322 | ||
322 | static void __exit chainiv_module_exit(void) | 323 | void __exit chainiv_module_exit(void) |
323 | { | 324 | { |
324 | crypto_unregister_template(&chainiv_tmpl); | 325 | crypto_unregister_template(&chainiv_tmpl); |
325 | } | 326 | } |
326 | 327 | EXPORT_SYMBOL_GPL(chainiv_module_exit); | |
327 | module_init(chainiv_module_init); | ||
328 | module_exit(chainiv_module_exit); | ||
329 | |||
330 | MODULE_LICENSE("GPL"); | ||
331 | MODULE_DESCRIPTION("Chain IV Generator"); | ||
diff --git a/crypto/digest.c b/crypto/digest.c index 6fd43bddd545..b526cc348b79 100644 --- a/crypto/digest.c +++ b/crypto/digest.c | |||
@@ -21,6 +21,8 @@ | |||
21 | #include <linux/module.h> | 21 | #include <linux/module.h> |
22 | #include <linux/scatterlist.h> | 22 | #include <linux/scatterlist.h> |
23 | 23 | ||
24 | #include "internal.h" | ||
25 | |||
24 | static int init(struct hash_desc *desc) | 26 | static int init(struct hash_desc *desc) |
25 | { | 27 | { |
26 | struct crypto_tfm *tfm = crypto_hash_tfm(desc->tfm); | 28 | struct crypto_tfm *tfm = crypto_hash_tfm(desc->tfm); |
diff --git a/crypto/eseqiv.c b/crypto/eseqiv.c index eb90d27ae118..6f2cd063b6fe 100644 --- a/crypto/eseqiv.c +++ b/crypto/eseqiv.c | |||
@@ -247,18 +247,14 @@ static struct crypto_template eseqiv_tmpl = { | |||
247 | .module = THIS_MODULE, | 247 | .module = THIS_MODULE, |
248 | }; | 248 | }; |
249 | 249 | ||
250 | static int __init eseqiv_module_init(void) | 250 | int __init eseqiv_module_init(void) |
251 | { | 251 | { |
252 | return crypto_register_template(&eseqiv_tmpl); | 252 | return crypto_register_template(&eseqiv_tmpl); |
253 | } | 253 | } |
254 | EXPORT_SYMBOL_GPL(eseqiv_module_init); | ||
254 | 255 | ||
255 | static void __exit eseqiv_module_exit(void) | 256 | void __exit eseqiv_module_exit(void) |
256 | { | 257 | { |
257 | crypto_unregister_template(&eseqiv_tmpl); | 258 | crypto_unregister_template(&eseqiv_tmpl); |
258 | } | 259 | } |
259 | 260 | EXPORT_SYMBOL_GPL(eseqiv_module_exit); | |
260 | module_init(eseqiv_module_init); | ||
261 | module_exit(eseqiv_module_exit); | ||
262 | |||
263 | MODULE_LICENSE("GPL"); | ||
264 | MODULE_DESCRIPTION("Encrypted Sequence Number IV Generator"); | ||
diff --git a/crypto/xcbc.c b/crypto/xcbc.c index 86727403e5ab..2feb0f239c38 100644 --- a/crypto/xcbc.c +++ b/crypto/xcbc.c | |||
@@ -124,6 +124,11 @@ static int crypto_xcbc_digest_update2(struct hash_desc *pdesc, | |||
124 | unsigned int offset = sg[i].offset; | 124 | unsigned int offset = sg[i].offset; |
125 | unsigned int slen = sg[i].length; | 125 | unsigned int slen = sg[i].length; |
126 | 126 | ||
127 | if (unlikely(slen > nbytes)) | ||
128 | slen = nbytes; | ||
129 | |||
130 | nbytes -= slen; | ||
131 | |||
127 | while (slen > 0) { | 132 | while (slen > 0) { |
128 | unsigned int len = min(slen, ((unsigned int)(PAGE_SIZE)) - offset); | 133 | unsigned int len = min(slen, ((unsigned int)(PAGE_SIZE)) - offset); |
129 | char *p = crypto_kmap(pg, 0) + offset; | 134 | char *p = crypto_kmap(pg, 0) + offset; |
@@ -177,7 +182,6 @@ static int crypto_xcbc_digest_update2(struct hash_desc *pdesc, | |||
177 | offset = 0; | 182 | offset = 0; |
178 | pg++; | 183 | pg++; |
179 | } | 184 | } |
180 | nbytes-=sg[i].length; | ||
181 | i++; | 185 | i++; |
182 | } while (nbytes>0); | 186 | } while (nbytes>0); |
183 | 187 | ||
diff --git a/crypto/xts.c b/crypto/xts.c index 8eb08bfaf7c0..d87b0f3102c3 100644 --- a/crypto/xts.c +++ b/crypto/xts.c | |||
@@ -77,16 +77,16 @@ static int setkey(struct crypto_tfm *parent, const u8 *key, | |||
77 | } | 77 | } |
78 | 78 | ||
79 | struct sinfo { | 79 | struct sinfo { |
80 | be128 t; | 80 | be128 *t; |
81 | struct crypto_tfm *tfm; | 81 | struct crypto_tfm *tfm; |
82 | void (*fn)(struct crypto_tfm *, u8 *, const u8 *); | 82 | void (*fn)(struct crypto_tfm *, u8 *, const u8 *); |
83 | }; | 83 | }; |
84 | 84 | ||
85 | static inline void xts_round(struct sinfo *s, void *dst, const void *src) | 85 | static inline void xts_round(struct sinfo *s, void *dst, const void *src) |
86 | { | 86 | { |
87 | be128_xor(dst, &s->t, src); /* PP <- T xor P */ | 87 | be128_xor(dst, s->t, src); /* PP <- T xor P */ |
88 | s->fn(s->tfm, dst, dst); /* CC <- E(Key1,PP) */ | 88 | s->fn(s->tfm, dst, dst); /* CC <- E(Key1,PP) */ |
89 | be128_xor(dst, dst, &s->t); /* C <- T xor CC */ | 89 | be128_xor(dst, dst, s->t); /* C <- T xor CC */ |
90 | } | 90 | } |
91 | 91 | ||
92 | static int crypt(struct blkcipher_desc *d, | 92 | static int crypt(struct blkcipher_desc *d, |
@@ -101,7 +101,6 @@ static int crypt(struct blkcipher_desc *d, | |||
101 | .tfm = crypto_cipher_tfm(ctx->child), | 101 | .tfm = crypto_cipher_tfm(ctx->child), |
102 | .fn = fn | 102 | .fn = fn |
103 | }; | 103 | }; |
104 | be128 *iv; | ||
105 | u8 *wsrc; | 104 | u8 *wsrc; |
106 | u8 *wdst; | 105 | u8 *wdst; |
107 | 106 | ||
@@ -109,20 +108,20 @@ static int crypt(struct blkcipher_desc *d, | |||
109 | if (!w->nbytes) | 108 | if (!w->nbytes) |
110 | return err; | 109 | return err; |
111 | 110 | ||
111 | s.t = (be128 *)w->iv; | ||
112 | avail = w->nbytes; | 112 | avail = w->nbytes; |
113 | 113 | ||
114 | wsrc = w->src.virt.addr; | 114 | wsrc = w->src.virt.addr; |
115 | wdst = w->dst.virt.addr; | 115 | wdst = w->dst.virt.addr; |
116 | 116 | ||
117 | /* calculate first value of T */ | 117 | /* calculate first value of T */ |
118 | iv = (be128 *)w->iv; | 118 | tw(crypto_cipher_tfm(ctx->tweak), w->iv, w->iv); |
119 | tw(crypto_cipher_tfm(ctx->tweak), (void *)&s.t, w->iv); | ||
120 | 119 | ||
121 | goto first; | 120 | goto first; |
122 | 121 | ||
123 | for (;;) { | 122 | for (;;) { |
124 | do { | 123 | do { |
125 | gf128mul_x_ble(&s.t, &s.t); | 124 | gf128mul_x_ble(s.t, s.t); |
126 | 125 | ||
127 | first: | 126 | first: |
128 | xts_round(&s, wdst, wsrc); | 127 | xts_round(&s, wdst, wsrc); |
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 1db93b619074..8a49835bd0f8 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c | |||
@@ -186,6 +186,7 @@ enum { | |||
186 | AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */ | 186 | AHCI_HFLAG_NO_MSI = (1 << 5), /* no PCI MSI */ |
187 | AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */ | 187 | AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */ |
188 | AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */ | 188 | AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */ |
189 | AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */ | ||
189 | 190 | ||
190 | /* ap->flags bits */ | 191 | /* ap->flags bits */ |
191 | 192 | ||
@@ -255,6 +256,7 @@ static void ahci_vt8251_error_handler(struct ata_port *ap); | |||
255 | static void ahci_p5wdh_error_handler(struct ata_port *ap); | 256 | static void ahci_p5wdh_error_handler(struct ata_port *ap); |
256 | static void ahci_post_internal_cmd(struct ata_queued_cmd *qc); | 257 | static void ahci_post_internal_cmd(struct ata_queued_cmd *qc); |
257 | static int ahci_port_resume(struct ata_port *ap); | 258 | static int ahci_port_resume(struct ata_port *ap); |
259 | static void ahci_dev_config(struct ata_device *dev); | ||
258 | static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl); | 260 | static unsigned int ahci_fill_sg(struct ata_queued_cmd *qc, void *cmd_tbl); |
259 | static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag, | 261 | static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag, |
260 | u32 opts); | 262 | u32 opts); |
@@ -294,6 +296,8 @@ static const struct ata_port_operations ahci_ops = { | |||
294 | .check_altstatus = ahci_check_status, | 296 | .check_altstatus = ahci_check_status, |
295 | .dev_select = ata_noop_dev_select, | 297 | .dev_select = ata_noop_dev_select, |
296 | 298 | ||
299 | .dev_config = ahci_dev_config, | ||
300 | |||
297 | .tf_read = ahci_tf_read, | 301 | .tf_read = ahci_tf_read, |
298 | 302 | ||
299 | .qc_defer = sata_pmp_qc_defer_cmd_switch, | 303 | .qc_defer = sata_pmp_qc_defer_cmd_switch, |
@@ -425,7 +429,7 @@ static const struct ata_port_info ahci_port_info[] = { | |||
425 | /* board_ahci_sb600 */ | 429 | /* board_ahci_sb600 */ |
426 | { | 430 | { |
427 | AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL | | 431 | AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL | |
428 | AHCI_HFLAG_32BIT_ONLY | AHCI_HFLAG_NO_PMP), | 432 | AHCI_HFLAG_SECT255 | AHCI_HFLAG_NO_PMP), |
429 | .flags = AHCI_FLAG_COMMON, | 433 | .flags = AHCI_FLAG_COMMON, |
430 | .link_flags = AHCI_LFLAG_COMMON, | 434 | .link_flags = AHCI_LFLAG_COMMON, |
431 | .pio_mask = 0x1f, /* pio0-4 */ | 435 | .pio_mask = 0x1f, /* pio0-4 */ |
@@ -1176,6 +1180,14 @@ static void ahci_init_controller(struct ata_host *host) | |||
1176 | VPRINTK("HOST_CTL 0x%x\n", tmp); | 1180 | VPRINTK("HOST_CTL 0x%x\n", tmp); |
1177 | } | 1181 | } |
1178 | 1182 | ||
1183 | static void ahci_dev_config(struct ata_device *dev) | ||
1184 | { | ||
1185 | struct ahci_host_priv *hpriv = dev->link->ap->host->private_data; | ||
1186 | |||
1187 | if (hpriv->flags & AHCI_HFLAG_SECT255) | ||
1188 | dev->max_sectors = 255; | ||
1189 | } | ||
1190 | |||
1179 | static unsigned int ahci_dev_classify(struct ata_port *ap) | 1191 | static unsigned int ahci_dev_classify(struct ata_port *ap) |
1180 | { | 1192 | { |
1181 | void __iomem *port_mmio = ahci_port_base(ap); | 1193 | void __iomem *port_mmio = ahci_port_base(ap); |
diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c index 0713872cf65c..a742efa0da2b 100644 --- a/drivers/ata/pata_hpt366.c +++ b/drivers/ata/pata_hpt366.c | |||
@@ -27,7 +27,7 @@ | |||
27 | #include <linux/libata.h> | 27 | #include <linux/libata.h> |
28 | 28 | ||
29 | #define DRV_NAME "pata_hpt366" | 29 | #define DRV_NAME "pata_hpt366" |
30 | #define DRV_VERSION "0.6.1" | 30 | #define DRV_VERSION "0.6.2" |
31 | 31 | ||
32 | struct hpt_clock { | 32 | struct hpt_clock { |
33 | u8 xfer_speed; | 33 | u8 xfer_speed; |
@@ -180,9 +180,9 @@ static unsigned long hpt366_filter(struct ata_device *adev, unsigned long mask) | |||
180 | if (hpt_dma_blacklisted(adev, "UDMA", bad_ata33)) | 180 | if (hpt_dma_blacklisted(adev, "UDMA", bad_ata33)) |
181 | mask &= ~ATA_MASK_UDMA; | 181 | mask &= ~ATA_MASK_UDMA; |
182 | if (hpt_dma_blacklisted(adev, "UDMA3", bad_ata66_3)) | 182 | if (hpt_dma_blacklisted(adev, "UDMA3", bad_ata66_3)) |
183 | mask &= ~(0x07 << ATA_SHIFT_UDMA); | 183 | mask &= ~(0xF8 << ATA_SHIFT_UDMA); |
184 | if (hpt_dma_blacklisted(adev, "UDMA4", bad_ata66_4)) | 184 | if (hpt_dma_blacklisted(adev, "UDMA4", bad_ata66_4)) |
185 | mask &= ~(0x0F << ATA_SHIFT_UDMA); | 185 | mask &= ~(0xF0 << ATA_SHIFT_UDMA); |
186 | } | 186 | } |
187 | return ata_pci_default_filter(adev, mask); | 187 | return ata_pci_default_filter(adev, mask); |
188 | } | 188 | } |
diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c index 68eb34929cec..9a10878b2ad8 100644 --- a/drivers/ata/pata_hpt37x.c +++ b/drivers/ata/pata_hpt37x.c | |||
@@ -24,7 +24,7 @@ | |||
24 | #include <linux/libata.h> | 24 | #include <linux/libata.h> |
25 | 25 | ||
26 | #define DRV_NAME "pata_hpt37x" | 26 | #define DRV_NAME "pata_hpt37x" |
27 | #define DRV_VERSION "0.6.9" | 27 | #define DRV_VERSION "0.6.11" |
28 | 28 | ||
29 | struct hpt_clock { | 29 | struct hpt_clock { |
30 | u8 xfer_speed; | 30 | u8 xfer_speed; |
@@ -281,7 +281,7 @@ static unsigned long hpt370_filter(struct ata_device *adev, unsigned long mask) | |||
281 | if (hpt_dma_blacklisted(adev, "UDMA", bad_ata33)) | 281 | if (hpt_dma_blacklisted(adev, "UDMA", bad_ata33)) |
282 | mask &= ~ATA_MASK_UDMA; | 282 | mask &= ~ATA_MASK_UDMA; |
283 | if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5)) | 283 | if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5)) |
284 | mask &= ~(0x1F << ATA_SHIFT_UDMA); | 284 | mask &= ~(0xE0 << ATA_SHIFT_UDMA); |
285 | } | 285 | } |
286 | return ata_pci_default_filter(adev, mask); | 286 | return ata_pci_default_filter(adev, mask); |
287 | } | 287 | } |
@@ -297,7 +297,7 @@ static unsigned long hpt370a_filter(struct ata_device *adev, unsigned long mask) | |||
297 | { | 297 | { |
298 | if (adev->class == ATA_DEV_ATA) { | 298 | if (adev->class == ATA_DEV_ATA) { |
299 | if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5)) | 299 | if (hpt_dma_blacklisted(adev, "UDMA100", bad_ata100_5)) |
300 | mask &= ~ (0x1F << ATA_SHIFT_UDMA); | 300 | mask &= ~(0xE0 << ATA_SHIFT_UDMA); |
301 | } | 301 | } |
302 | return ata_pci_default_filter(adev, mask); | 302 | return ata_pci_default_filter(adev, mask); |
303 | } | 303 | } |
diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c index 9c523fbf529e..a589c0fa0dbb 100644 --- a/drivers/ata/pata_serverworks.c +++ b/drivers/ata/pata_serverworks.c | |||
@@ -226,7 +226,7 @@ static unsigned long serverworks_csb_filter(struct ata_device *adev, unsigned lo | |||
226 | 226 | ||
227 | for (i = 0; (p = csb_bad_ata100[i]) != NULL; i++) { | 227 | for (i = 0; (p = csb_bad_ata100[i]) != NULL; i++) { |
228 | if (!strcmp(p, model_num)) | 228 | if (!strcmp(p, model_num)) |
229 | mask &= ~(0x1F << ATA_SHIFT_UDMA); | 229 | mask &= ~(0xE0 << ATA_SHIFT_UDMA); |
230 | } | 230 | } |
231 | return ata_pci_default_filter(adev, mask); | 231 | return ata_pci_default_filter(adev, mask); |
232 | } | 232 | } |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 89a29cd93783..35a26a3e5f68 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -671,13 +671,13 @@ static ssize_t show(struct kobject * kobj, struct attribute * attr ,char * buf) | |||
671 | { | 671 | { |
672 | struct cpufreq_policy * policy = to_policy(kobj); | 672 | struct cpufreq_policy * policy = to_policy(kobj); |
673 | struct freq_attr * fattr = to_attr(attr); | 673 | struct freq_attr * fattr = to_attr(attr); |
674 | ssize_t ret; | 674 | ssize_t ret = -EINVAL; |
675 | policy = cpufreq_cpu_get(policy->cpu); | 675 | policy = cpufreq_cpu_get(policy->cpu); |
676 | if (!policy) | 676 | if (!policy) |
677 | return -EINVAL; | 677 | goto no_policy; |
678 | 678 | ||
679 | if (lock_policy_rwsem_read(policy->cpu) < 0) | 679 | if (lock_policy_rwsem_read(policy->cpu) < 0) |
680 | return -EINVAL; | 680 | goto fail; |
681 | 681 | ||
682 | if (fattr->show) | 682 | if (fattr->show) |
683 | ret = fattr->show(policy, buf); | 683 | ret = fattr->show(policy, buf); |
@@ -685,8 +685,9 @@ static ssize_t show(struct kobject * kobj, struct attribute * attr ,char * buf) | |||
685 | ret = -EIO; | 685 | ret = -EIO; |
686 | 686 | ||
687 | unlock_policy_rwsem_read(policy->cpu); | 687 | unlock_policy_rwsem_read(policy->cpu); |
688 | 688 | fail: | |
689 | cpufreq_cpu_put(policy); | 689 | cpufreq_cpu_put(policy); |
690 | no_policy: | ||
690 | return ret; | 691 | return ret; |
691 | } | 692 | } |
692 | 693 | ||
@@ -695,13 +696,13 @@ static ssize_t store(struct kobject * kobj, struct attribute * attr, | |||
695 | { | 696 | { |
696 | struct cpufreq_policy * policy = to_policy(kobj); | 697 | struct cpufreq_policy * policy = to_policy(kobj); |
697 | struct freq_attr * fattr = to_attr(attr); | 698 | struct freq_attr * fattr = to_attr(attr); |
698 | ssize_t ret; | 699 | ssize_t ret = -EINVAL; |
699 | policy = cpufreq_cpu_get(policy->cpu); | 700 | policy = cpufreq_cpu_get(policy->cpu); |
700 | if (!policy) | 701 | if (!policy) |
701 | return -EINVAL; | 702 | goto no_policy; |
702 | 703 | ||
703 | if (lock_policy_rwsem_write(policy->cpu) < 0) | 704 | if (lock_policy_rwsem_write(policy->cpu) < 0) |
704 | return -EINVAL; | 705 | goto fail; |
705 | 706 | ||
706 | if (fattr->store) | 707 | if (fattr->store) |
707 | ret = fattr->store(policy, buf, count); | 708 | ret = fattr->store(policy, buf, count); |
@@ -709,8 +710,9 @@ static ssize_t store(struct kobject * kobj, struct attribute * attr, | |||
709 | ret = -EIO; | 710 | ret = -EIO; |
710 | 711 | ||
711 | unlock_policy_rwsem_write(policy->cpu); | 712 | unlock_policy_rwsem_write(policy->cpu); |
712 | 713 | fail: | |
713 | cpufreq_cpu_put(policy); | 714 | cpufreq_cpu_put(policy); |
715 | no_policy: | ||
714 | return ret; | 716 | return ret; |
715 | } | 717 | } |
716 | 718 | ||
@@ -1775,7 +1777,7 @@ static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb, | |||
1775 | return NOTIFY_OK; | 1777 | return NOTIFY_OK; |
1776 | } | 1778 | } |
1777 | 1779 | ||
1778 | static struct notifier_block __cpuinitdata cpufreq_cpu_notifier = | 1780 | static struct notifier_block __refdata cpufreq_cpu_notifier = |
1779 | { | 1781 | { |
1780 | .notifier_call = cpufreq_cpu_callback, | 1782 | .notifier_call = cpufreq_cpu_callback, |
1781 | }; | 1783 | }; |
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c index 1b8312b02006..070421a5480e 100644 --- a/drivers/cpufreq/cpufreq_stats.c +++ b/drivers/cpufreq/cpufreq_stats.c | |||
@@ -323,7 +323,7 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb, | |||
323 | return NOTIFY_OK; | 323 | return NOTIFY_OK; |
324 | } | 324 | } |
325 | 325 | ||
326 | static struct notifier_block cpufreq_stat_cpu_notifier __cpuinitdata = | 326 | static struct notifier_block cpufreq_stat_cpu_notifier __refdata = |
327 | { | 327 | { |
328 | .notifier_call = cpufreq_stat_cpu_callback, | 328 | .notifier_call = cpufreq_stat_cpu_callback, |
329 | }; | 329 | }; |
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index f77b329f6923..78734e25edd5 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c | |||
@@ -1701,6 +1701,11 @@ mptsas_sas_expander_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info, | |||
1701 | if (error) | 1701 | if (error) |
1702 | goto out_free_consistent; | 1702 | goto out_free_consistent; |
1703 | 1703 | ||
1704 | if (!buffer->NumPhys) { | ||
1705 | error = -ENODEV; | ||
1706 | goto out_free_consistent; | ||
1707 | } | ||
1708 | |||
1704 | /* save config data */ | 1709 | /* save config data */ |
1705 | port_info->num_phys = buffer->NumPhys; | 1710 | port_info->num_phys = buffer->NumPhys; |
1706 | port_info->phy_info = kcalloc(port_info->num_phys, | 1711 | port_info->phy_info = kcalloc(port_info->num_phys, |
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c index 6ac81e35355c..275960462970 100644 --- a/drivers/mtd/ubi/build.c +++ b/drivers/mtd/ubi/build.c | |||
@@ -1000,8 +1000,8 @@ static int __init ubi_init(void) | |||
1000 | mutex_unlock(&ubi_devices_mutex); | 1000 | mutex_unlock(&ubi_devices_mutex); |
1001 | if (err < 0) { | 1001 | if (err < 0) { |
1002 | put_mtd_device(mtd); | 1002 | put_mtd_device(mtd); |
1003 | printk(KERN_ERR "UBI error: cannot attach %s\n", | 1003 | printk(KERN_ERR "UBI error: cannot attach mtd%d\n", |
1004 | p->name); | 1004 | mtd->index); |
1005 | goto out_detach; | 1005 | goto out_detach; |
1006 | } | 1006 | } |
1007 | } | 1007 | } |
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h index 457710615261..a548c1d28fa8 100644 --- a/drivers/mtd/ubi/ubi.h +++ b/drivers/mtd/ubi/ubi.h | |||
@@ -217,11 +217,11 @@ struct ubi_volume { | |||
217 | void *upd_buf; | 217 | void *upd_buf; |
218 | 218 | ||
219 | int *eba_tbl; | 219 | int *eba_tbl; |
220 | int checked:1; | 220 | unsigned int checked:1; |
221 | int corrupted:1; | 221 | unsigned int corrupted:1; |
222 | int upd_marker:1; | 222 | unsigned int upd_marker:1; |
223 | int updating:1; | 223 | unsigned int updating:1; |
224 | int changing_leb:1; | 224 | unsigned int changing_leb:1; |
225 | 225 | ||
226 | #ifdef CONFIG_MTD_UBI_GLUEBI | 226 | #ifdef CONFIG_MTD_UBI_GLUEBI |
227 | /* | 227 | /* |
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c index a3ca2257e601..5be58d85c639 100644 --- a/drivers/mtd/ubi/vmt.c +++ b/drivers/mtd/ubi/vmt.c | |||
@@ -376,7 +376,9 @@ out_sysfs: | |||
376 | get_device(&vol->dev); | 376 | get_device(&vol->dev); |
377 | volume_sysfs_close(vol); | 377 | volume_sysfs_close(vol); |
378 | out_gluebi: | 378 | out_gluebi: |
379 | ubi_destroy_gluebi(vol); | 379 | if (ubi_destroy_gluebi(vol)) |
380 | dbg_err("cannot destroy gluebi for volume %d:%d", | ||
381 | ubi->ubi_num, vol_id); | ||
380 | out_cdev: | 382 | out_cdev: |
381 | cdev_del(&vol->cdev); | 383 | cdev_del(&vol->cdev); |
382 | out_mapping: | 384 | out_mapping: |
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c index 56fc3fbce838..af36b12be278 100644 --- a/drivers/mtd/ubi/vtbl.c +++ b/drivers/mtd/ubi/vtbl.c | |||
@@ -519,6 +519,7 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si, | |||
519 | if (ubi->autoresize_vol_id != -1) { | 519 | if (ubi->autoresize_vol_id != -1) { |
520 | ubi_err("more then one auto-resize volume (%d " | 520 | ubi_err("more then one auto-resize volume (%d " |
521 | "and %d)", ubi->autoresize_vol_id, i); | 521 | "and %d)", ubi->autoresize_vol_id, i); |
522 | kfree(vol); | ||
522 | return -EINVAL; | 523 | return -EINVAL; |
523 | } | 524 | } |
524 | 525 | ||
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c index 60d338cd8009..62db3c3fe4dc 100644 --- a/drivers/parisc/ccio-dma.c +++ b/drivers/parisc/ccio-dma.c | |||
@@ -366,8 +366,8 @@ ccio_alloc_range(struct ioc *ioc, struct device *dev, size_t size) | |||
366 | ** ggg sacrifices another 710 to the computer gods. | 366 | ** ggg sacrifices another 710 to the computer gods. |
367 | */ | 367 | */ |
368 | 368 | ||
369 | boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, 1 << IOVP_SHIFT); | 369 | boundary_size = ALIGN((unsigned long long)dma_get_seg_boundary(dev) + 1, |
370 | boundary_size >>= IOVP_SHIFT; | 370 | 1ULL << IOVP_SHIFT) >> IOVP_SHIFT; |
371 | 371 | ||
372 | if (pages_needed <= 8) { | 372 | if (pages_needed <= 8) { |
373 | /* | 373 | /* |
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c index e834127a8505..bdbe780e21c5 100644 --- a/drivers/parisc/sba_iommu.c +++ b/drivers/parisc/sba_iommu.c | |||
@@ -341,8 +341,8 @@ sba_search_bitmap(struct ioc *ioc, struct device *dev, | |||
341 | unsigned long shift; | 341 | unsigned long shift; |
342 | int ret; | 342 | int ret; |
343 | 343 | ||
344 | boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, 1 << IOVP_SHIFT); | 344 | boundary_size = ALIGN((unsigned long long)dma_get_seg_boundary(dev) + 1, |
345 | boundary_size >>= IOVP_SHIFT; | 345 | 1ULL << IOVP_SHIFT) >> IOVP_SHIFT; |
346 | 346 | ||
347 | #if defined(ZX1_SUPPORT) | 347 | #if defined(ZX1_SUPPORT) |
348 | BUG_ON(ioc->ibase & ~IOVP_MASK); | 348 | BUG_ON(ioc->ibase & ~IOVP_MASK); |
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c index f69714a0e9e7..b19db20a0bef 100644 --- a/drivers/s390/block/dasd_3990_erp.c +++ b/drivers/s390/block/dasd_3990_erp.c | |||
@@ -2310,10 +2310,8 @@ static int | |||
2310 | dasd_3990_erp_error_match(struct dasd_ccw_req *cqr1, struct dasd_ccw_req *cqr2) | 2310 | dasd_3990_erp_error_match(struct dasd_ccw_req *cqr1, struct dasd_ccw_req *cqr2) |
2311 | { | 2311 | { |
2312 | 2312 | ||
2313 | /* check failed CCW */ | 2313 | if (cqr1->startdev != cqr2->startdev) |
2314 | if (cqr1->irb.scsw.cpa != cqr2->irb.scsw.cpa) { | 2314 | return 0; |
2315 | // return 0; /* CCW doesn't match */ | ||
2316 | } | ||
2317 | 2315 | ||
2318 | if (cqr1->irb.esw.esw0.erw.cons != cqr2->irb.esw.esw0.erw.cons) | 2316 | if (cqr1->irb.esw.esw0.erw.cons != cqr2->irb.esw.esw0.erw.cons) |
2319 | return 0; | 2317 | return 0; |
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c index 28a86f070048..556063e8f7a9 100644 --- a/drivers/s390/block/dasd_proc.c +++ b/drivers/s390/block/dasd_proc.c | |||
@@ -62,8 +62,10 @@ dasd_devices_show(struct seq_file *m, void *v) | |||
62 | return 0; | 62 | return 0; |
63 | if (device->block) | 63 | if (device->block) |
64 | block = device->block; | 64 | block = device->block; |
65 | else | 65 | else { |
66 | dasd_put_device(device); | ||
66 | return 0; | 67 | return 0; |
68 | } | ||
67 | /* Print device number. */ | 69 | /* Print device number. */ |
68 | seq_printf(m, "%s", device->cdev->dev.bus_id); | 70 | seq_printf(m, "%s", device->cdev->dev.bus_id); |
69 | /* Print discipline string. */ | 71 | /* Print discipline string. */ |
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c index 92f527201792..f7b258dfd52c 100644 --- a/drivers/s390/char/sclp_vt220.c +++ b/drivers/s390/char/sclp_vt220.c | |||
@@ -367,7 +367,7 @@ sclp_vt220_timeout(unsigned long data) | |||
367 | sclp_vt220_emit_current(); | 367 | sclp_vt220_emit_current(); |
368 | } | 368 | } |
369 | 369 | ||
370 | #define BUFFER_MAX_DELAY HZ/2 | 370 | #define BUFFER_MAX_DELAY HZ/20 |
371 | 371 | ||
372 | /* | 372 | /* |
373 | * Internal implementation of the write function. Write COUNT bytes of data | 373 | * Internal implementation of the write function. Write COUNT bytes of data |
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index d0c6fd3b1c19..7b0b81901297 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c | |||
@@ -490,10 +490,12 @@ static int ap_device_probe(struct device *dev) | |||
490 | int rc; | 490 | int rc; |
491 | 491 | ||
492 | ap_dev->drv = ap_drv; | 492 | ap_dev->drv = ap_drv; |
493 | spin_lock_bh(&ap_device_lock); | ||
494 | list_add(&ap_dev->list, &ap_device_list); | ||
495 | spin_unlock_bh(&ap_device_lock); | ||
496 | rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV; | 493 | rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV; |
494 | if (!rc) { | ||
495 | spin_lock_bh(&ap_device_lock); | ||
496 | list_add(&ap_dev->list, &ap_device_list); | ||
497 | spin_unlock_bh(&ap_device_lock); | ||
498 | } | ||
497 | return rc; | 499 | return rc; |
498 | } | 500 | } |
499 | 501 | ||
@@ -532,11 +534,11 @@ static int ap_device_remove(struct device *dev) | |||
532 | 534 | ||
533 | ap_flush_queue(ap_dev); | 535 | ap_flush_queue(ap_dev); |
534 | del_timer_sync(&ap_dev->timeout); | 536 | del_timer_sync(&ap_dev->timeout); |
535 | if (ap_drv->remove) | ||
536 | ap_drv->remove(ap_dev); | ||
537 | spin_lock_bh(&ap_device_lock); | 537 | spin_lock_bh(&ap_device_lock); |
538 | list_del_init(&ap_dev->list); | 538 | list_del_init(&ap_dev->list); |
539 | spin_unlock_bh(&ap_device_lock); | 539 | spin_unlock_bh(&ap_device_lock); |
540 | if (ap_drv->remove) | ||
541 | ap_drv->remove(ap_dev); | ||
540 | spin_lock_bh(&ap_dev->lock); | 542 | spin_lock_bh(&ap_dev->lock); |
541 | atomic_sub(ap_dev->queue_count, &ap_poll_requests); | 543 | atomic_sub(ap_dev->queue_count, &ap_poll_requests); |
542 | spin_unlock_bh(&ap_dev->lock); | 544 | spin_unlock_bh(&ap_dev->lock); |
diff --git a/drivers/scsi/aic94xx/aic94xx.h b/drivers/scsi/aic94xx/aic94xx.h index 32f513b1b78a..eb8efdcefe48 100644 --- a/drivers/scsi/aic94xx/aic94xx.h +++ b/drivers/scsi/aic94xx/aic94xx.h | |||
@@ -102,6 +102,7 @@ int asd_abort_task_set(struct domain_device *, u8 *lun); | |||
102 | int asd_clear_aca(struct domain_device *, u8 *lun); | 102 | int asd_clear_aca(struct domain_device *, u8 *lun); |
103 | int asd_clear_task_set(struct domain_device *, u8 *lun); | 103 | int asd_clear_task_set(struct domain_device *, u8 *lun); |
104 | int asd_lu_reset(struct domain_device *, u8 *lun); | 104 | int asd_lu_reset(struct domain_device *, u8 *lun); |
105 | int asd_I_T_nexus_reset(struct domain_device *dev); | ||
105 | int asd_query_task(struct sas_task *); | 106 | int asd_query_task(struct sas_task *); |
106 | 107 | ||
107 | /* ---------- Adapter and Port management ---------- */ | 108 | /* ---------- Adapter and Port management ---------- */ |
diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.h b/drivers/scsi/aic94xx/aic94xx_hwi.h index 150f6706d23f..abc757559c1a 100644 --- a/drivers/scsi/aic94xx/aic94xx_hwi.h +++ b/drivers/scsi/aic94xx/aic94xx_hwi.h | |||
@@ -140,7 +140,7 @@ struct asd_ascb { | |||
140 | 140 | ||
141 | /* internally generated command */ | 141 | /* internally generated command */ |
142 | struct timer_list timer; | 142 | struct timer_list timer; |
143 | struct completion completion; | 143 | struct completion *completion; |
144 | u8 tag_valid:1; | 144 | u8 tag_valid:1; |
145 | __be16 tag; /* error recovery only */ | 145 | __be16 tag; /* error recovery only */ |
146 | 146 | ||
@@ -294,7 +294,6 @@ static inline void asd_init_ascb(struct asd_ha_struct *asd_ha, | |||
294 | ascb->timer.function = NULL; | 294 | ascb->timer.function = NULL; |
295 | init_timer(&ascb->timer); | 295 | init_timer(&ascb->timer); |
296 | ascb->tc_index = -1; | 296 | ascb->tc_index = -1; |
297 | init_completion(&ascb->completion); | ||
298 | } | 297 | } |
299 | 298 | ||
300 | /* Must be called with the tc_index_lock held! | 299 | /* Must be called with the tc_index_lock held! |
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c index 5d761eb67442..88d1e731b65e 100644 --- a/drivers/scsi/aic94xx/aic94xx_init.c +++ b/drivers/scsi/aic94xx/aic94xx_init.c | |||
@@ -1003,7 +1003,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = { | |||
1003 | .lldd_abort_task_set = asd_abort_task_set, | 1003 | .lldd_abort_task_set = asd_abort_task_set, |
1004 | .lldd_clear_aca = asd_clear_aca, | 1004 | .lldd_clear_aca = asd_clear_aca, |
1005 | .lldd_clear_task_set = asd_clear_task_set, | 1005 | .lldd_clear_task_set = asd_clear_task_set, |
1006 | .lldd_I_T_nexus_reset = NULL, | 1006 | .lldd_I_T_nexus_reset = asd_I_T_nexus_reset, |
1007 | .lldd_lu_reset = asd_lu_reset, | 1007 | .lldd_lu_reset = asd_lu_reset, |
1008 | .lldd_query_task = asd_query_task, | 1008 | .lldd_query_task = asd_query_task, |
1009 | 1009 | ||
diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c index 965d4bb999d9..008df9ab92a5 100644 --- a/drivers/scsi/aic94xx/aic94xx_task.c +++ b/drivers/scsi/aic94xx/aic94xx_task.c | |||
@@ -343,11 +343,13 @@ Again: | |||
343 | task->task_state_flags &= ~SAS_TASK_AT_INITIATOR; | 343 | task->task_state_flags &= ~SAS_TASK_AT_INITIATOR; |
344 | task->task_state_flags |= SAS_TASK_STATE_DONE; | 344 | task->task_state_flags |= SAS_TASK_STATE_DONE; |
345 | if (unlikely((task->task_state_flags & SAS_TASK_STATE_ABORTED))) { | 345 | if (unlikely((task->task_state_flags & SAS_TASK_STATE_ABORTED))) { |
346 | struct completion *completion = ascb->completion; | ||
346 | spin_unlock_irqrestore(&task->task_state_lock, flags); | 347 | spin_unlock_irqrestore(&task->task_state_lock, flags); |
347 | ASD_DPRINTK("task 0x%p done with opcode 0x%x resp 0x%x " | 348 | ASD_DPRINTK("task 0x%p done with opcode 0x%x resp 0x%x " |
348 | "stat 0x%x but aborted by upper layer!\n", | 349 | "stat 0x%x but aborted by upper layer!\n", |
349 | task, opcode, ts->resp, ts->stat); | 350 | task, opcode, ts->resp, ts->stat); |
350 | complete(&ascb->completion); | 351 | if (completion) |
352 | complete(completion); | ||
351 | } else { | 353 | } else { |
352 | spin_unlock_irqrestore(&task->task_state_lock, flags); | 354 | spin_unlock_irqrestore(&task->task_state_lock, flags); |
353 | task->lldd_task = NULL; | 355 | task->lldd_task = NULL; |
diff --git a/drivers/scsi/aic94xx/aic94xx_tmf.c b/drivers/scsi/aic94xx/aic94xx_tmf.c index 144f5ad20453..b9ac8f703a1d 100644 --- a/drivers/scsi/aic94xx/aic94xx_tmf.c +++ b/drivers/scsi/aic94xx/aic94xx_tmf.c | |||
@@ -53,50 +53,64 @@ static int asd_enqueue_internal(struct asd_ascb *ascb, | |||
53 | return res; | 53 | return res; |
54 | } | 54 | } |
55 | 55 | ||
56 | static inline void asd_timedout_common(unsigned long data) | 56 | /* ---------- CLEAR NEXUS ---------- */ |
57 | { | ||
58 | struct asd_ascb *ascb = (void *) data; | ||
59 | struct asd_seq_data *seq = &ascb->ha->seq; | ||
60 | unsigned long flags; | ||
61 | 57 | ||
62 | spin_lock_irqsave(&seq->pend_q_lock, flags); | 58 | struct tasklet_completion_status { |
63 | seq->pending--; | 59 | int dl_opcode; |
64 | list_del_init(&ascb->list); | 60 | int tmf_state; |
65 | spin_unlock_irqrestore(&seq->pend_q_lock, flags); | 61 | u8 tag_valid:1; |
66 | } | 62 | __be16 tag; |
63 | }; | ||
64 | |||
65 | #define DECLARE_TCS(tcs) \ | ||
66 | struct tasklet_completion_status tcs = { \ | ||
67 | .dl_opcode = 0, \ | ||
68 | .tmf_state = 0, \ | ||
69 | .tag_valid = 0, \ | ||
70 | .tag = 0, \ | ||
71 | } | ||
67 | 72 | ||
68 | /* ---------- CLEAR NEXUS ---------- */ | ||
69 | 73 | ||
70 | static void asd_clear_nexus_tasklet_complete(struct asd_ascb *ascb, | 74 | static void asd_clear_nexus_tasklet_complete(struct asd_ascb *ascb, |
71 | struct done_list_struct *dl) | 75 | struct done_list_struct *dl) |
72 | { | 76 | { |
77 | struct tasklet_completion_status *tcs = ascb->uldd_task; | ||
73 | ASD_DPRINTK("%s: here\n", __FUNCTION__); | 78 | ASD_DPRINTK("%s: here\n", __FUNCTION__); |
74 | if (!del_timer(&ascb->timer)) { | 79 | if (!del_timer(&ascb->timer)) { |
75 | ASD_DPRINTK("%s: couldn't delete timer\n", __FUNCTION__); | 80 | ASD_DPRINTK("%s: couldn't delete timer\n", __FUNCTION__); |
76 | return; | 81 | return; |
77 | } | 82 | } |
78 | ASD_DPRINTK("%s: opcode: 0x%x\n", __FUNCTION__, dl->opcode); | 83 | ASD_DPRINTK("%s: opcode: 0x%x\n", __FUNCTION__, dl->opcode); |
79 | ascb->uldd_task = (void *) (unsigned long) dl->opcode; | 84 | tcs->dl_opcode = dl->opcode; |
80 | complete(&ascb->completion); | 85 | complete(ascb->completion); |
86 | asd_ascb_free(ascb); | ||
81 | } | 87 | } |
82 | 88 | ||
83 | static void asd_clear_nexus_timedout(unsigned long data) | 89 | static void asd_clear_nexus_timedout(unsigned long data) |
84 | { | 90 | { |
85 | struct asd_ascb *ascb = (void *) data; | 91 | struct asd_ascb *ascb = (void *)data; |
92 | struct tasklet_completion_status *tcs = ascb->uldd_task; | ||
86 | 93 | ||
87 | ASD_DPRINTK("%s: here\n", __FUNCTION__); | 94 | ASD_DPRINTK("%s: here\n", __FUNCTION__); |
88 | asd_timedout_common(data); | 95 | tcs->dl_opcode = TMF_RESP_FUNC_FAILED; |
89 | ascb->uldd_task = (void *) TMF_RESP_FUNC_FAILED; | 96 | complete(ascb->completion); |
90 | complete(&ascb->completion); | ||
91 | } | 97 | } |
92 | 98 | ||
93 | #define CLEAR_NEXUS_PRE \ | 99 | #define CLEAR_NEXUS_PRE \ |
100 | struct asd_ascb *ascb; \ | ||
101 | struct scb *scb; \ | ||
102 | int res; \ | ||
103 | DECLARE_COMPLETION_ONSTACK(completion); \ | ||
104 | DECLARE_TCS(tcs); \ | ||
105 | \ | ||
94 | ASD_DPRINTK("%s: PRE\n", __FUNCTION__); \ | 106 | ASD_DPRINTK("%s: PRE\n", __FUNCTION__); \ |
95 | res = 1; \ | 107 | res = 1; \ |
96 | ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); \ | 108 | ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); \ |
97 | if (!ascb) \ | 109 | if (!ascb) \ |
98 | return -ENOMEM; \ | 110 | return -ENOMEM; \ |
99 | \ | 111 | \ |
112 | ascb->completion = &completion; \ | ||
113 | ascb->uldd_task = &tcs; \ | ||
100 | scb = ascb->scb; \ | 114 | scb = ascb->scb; \ |
101 | scb->header.opcode = CLEAR_NEXUS | 115 | scb->header.opcode = CLEAR_NEXUS |
102 | 116 | ||
@@ -107,10 +121,11 @@ static void asd_clear_nexus_timedout(unsigned long data) | |||
107 | if (res) \ | 121 | if (res) \ |
108 | goto out_err; \ | 122 | goto out_err; \ |
109 | ASD_DPRINTK("%s: clear nexus posted, waiting...\n", __FUNCTION__); \ | 123 | ASD_DPRINTK("%s: clear nexus posted, waiting...\n", __FUNCTION__); \ |
110 | wait_for_completion(&ascb->completion); \ | 124 | wait_for_completion(&completion); \ |
111 | res = (int) (unsigned long) ascb->uldd_task; \ | 125 | res = tcs.dl_opcode; \ |
112 | if (res == TC_NO_ERROR) \ | 126 | if (res == TC_NO_ERROR) \ |
113 | res = TMF_RESP_FUNC_COMPLETE; \ | 127 | res = TMF_RESP_FUNC_COMPLETE; \ |
128 | return res; \ | ||
114 | out_err: \ | 129 | out_err: \ |
115 | asd_ascb_free(ascb); \ | 130 | asd_ascb_free(ascb); \ |
116 | return res | 131 | return res |
@@ -118,9 +133,6 @@ out_err: \ | |||
118 | int asd_clear_nexus_ha(struct sas_ha_struct *sas_ha) | 133 | int asd_clear_nexus_ha(struct sas_ha_struct *sas_ha) |
119 | { | 134 | { |
120 | struct asd_ha_struct *asd_ha = sas_ha->lldd_ha; | 135 | struct asd_ha_struct *asd_ha = sas_ha->lldd_ha; |
121 | struct asd_ascb *ascb; | ||
122 | struct scb *scb; | ||
123 | int res; | ||
124 | 136 | ||
125 | CLEAR_NEXUS_PRE; | 137 | CLEAR_NEXUS_PRE; |
126 | scb->clear_nexus.nexus = NEXUS_ADAPTER; | 138 | scb->clear_nexus.nexus = NEXUS_ADAPTER; |
@@ -130,9 +142,6 @@ int asd_clear_nexus_ha(struct sas_ha_struct *sas_ha) | |||
130 | int asd_clear_nexus_port(struct asd_sas_port *port) | 142 | int asd_clear_nexus_port(struct asd_sas_port *port) |
131 | { | 143 | { |
132 | struct asd_ha_struct *asd_ha = port->ha->lldd_ha; | 144 | struct asd_ha_struct *asd_ha = port->ha->lldd_ha; |
133 | struct asd_ascb *ascb; | ||
134 | struct scb *scb; | ||
135 | int res; | ||
136 | 145 | ||
137 | CLEAR_NEXUS_PRE; | 146 | CLEAR_NEXUS_PRE; |
138 | scb->clear_nexus.nexus = NEXUS_PORT; | 147 | scb->clear_nexus.nexus = NEXUS_PORT; |
@@ -140,29 +149,73 @@ int asd_clear_nexus_port(struct asd_sas_port *port) | |||
140 | CLEAR_NEXUS_POST; | 149 | CLEAR_NEXUS_POST; |
141 | } | 150 | } |
142 | 151 | ||
143 | #if 0 | 152 | enum clear_nexus_phase { |
144 | static int asd_clear_nexus_I_T(struct domain_device *dev) | 153 | NEXUS_PHASE_PRE, |
154 | NEXUS_PHASE_POST, | ||
155 | NEXUS_PHASE_RESUME, | ||
156 | }; | ||
157 | |||
158 | static int asd_clear_nexus_I_T(struct domain_device *dev, | ||
159 | enum clear_nexus_phase phase) | ||
145 | { | 160 | { |
146 | struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; | 161 | struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; |
147 | struct asd_ascb *ascb; | ||
148 | struct scb *scb; | ||
149 | int res; | ||
150 | 162 | ||
151 | CLEAR_NEXUS_PRE; | 163 | CLEAR_NEXUS_PRE; |
152 | scb->clear_nexus.nexus = NEXUS_I_T; | 164 | scb->clear_nexus.nexus = NEXUS_I_T; |
153 | scb->clear_nexus.flags = SEND_Q | EXEC_Q | NOTINQ; | 165 | switch (phase) { |
166 | case NEXUS_PHASE_PRE: | ||
167 | scb->clear_nexus.flags = EXEC_Q | SUSPEND_TX; | ||
168 | break; | ||
169 | case NEXUS_PHASE_POST: | ||
170 | scb->clear_nexus.flags = SEND_Q | NOTINQ; | ||
171 | break; | ||
172 | case NEXUS_PHASE_RESUME: | ||
173 | scb->clear_nexus.flags = RESUME_TX; | ||
174 | } | ||
154 | scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long) | 175 | scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long) |
155 | dev->lldd_dev); | 176 | dev->lldd_dev); |
156 | CLEAR_NEXUS_POST; | 177 | CLEAR_NEXUS_POST; |
157 | } | 178 | } |
158 | #endif | 179 | |
180 | int asd_I_T_nexus_reset(struct domain_device *dev) | ||
181 | { | ||
182 | int res, tmp_res, i; | ||
183 | struct sas_phy *phy = sas_find_local_phy(dev); | ||
184 | /* Standard mandates link reset for ATA (type 0) and | ||
185 | * hard reset for SSP (type 1) */ | ||
186 | int reset_type = (dev->dev_type == SATA_DEV || | ||
187 | (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1; | ||
188 | |||
189 | asd_clear_nexus_I_T(dev, NEXUS_PHASE_PRE); | ||
190 | /* send a hard reset */ | ||
191 | ASD_DPRINTK("sending %s reset to %s\n", | ||
192 | reset_type ? "hard" : "soft", phy->dev.bus_id); | ||
193 | res = sas_phy_reset(phy, reset_type); | ||
194 | if (res == TMF_RESP_FUNC_COMPLETE) { | ||
195 | /* wait for the maximum settle time */ | ||
196 | msleep(500); | ||
197 | /* clear all outstanding commands (keep nexus suspended) */ | ||
198 | asd_clear_nexus_I_T(dev, NEXUS_PHASE_POST); | ||
199 | } | ||
200 | for (i = 0 ; i < 3; i++) { | ||
201 | tmp_res = asd_clear_nexus_I_T(dev, NEXUS_PHASE_RESUME); | ||
202 | if (tmp_res == TC_RESUME) | ||
203 | return res; | ||
204 | msleep(500); | ||
205 | } | ||
206 | |||
207 | /* This is a bit of a problem: the sequencer is still suspended | ||
208 | * and is refusing to resume. Hope it will resume on a bigger hammer | ||
209 | * or the disk is lost */ | ||
210 | dev_printk(KERN_ERR, &phy->dev, | ||
211 | "Failed to resume nexus after reset 0x%x\n", tmp_res); | ||
212 | |||
213 | return TMF_RESP_FUNC_FAILED; | ||
214 | } | ||
159 | 215 | ||
160 | static int asd_clear_nexus_I_T_L(struct domain_device *dev, u8 *lun) | 216 | static int asd_clear_nexus_I_T_L(struct domain_device *dev, u8 *lun) |
161 | { | 217 | { |
162 | struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; | 218 | struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; |
163 | struct asd_ascb *ascb; | ||
164 | struct scb *scb; | ||
165 | int res; | ||
166 | 219 | ||
167 | CLEAR_NEXUS_PRE; | 220 | CLEAR_NEXUS_PRE; |
168 | scb->clear_nexus.nexus = NEXUS_I_T_L; | 221 | scb->clear_nexus.nexus = NEXUS_I_T_L; |
@@ -177,9 +230,6 @@ static int asd_clear_nexus_tag(struct sas_task *task) | |||
177 | { | 230 | { |
178 | struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha; | 231 | struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha; |
179 | struct asd_ascb *tascb = task->lldd_task; | 232 | struct asd_ascb *tascb = task->lldd_task; |
180 | struct asd_ascb *ascb; | ||
181 | struct scb *scb; | ||
182 | int res; | ||
183 | 233 | ||
184 | CLEAR_NEXUS_PRE; | 234 | CLEAR_NEXUS_PRE; |
185 | scb->clear_nexus.nexus = NEXUS_TAG; | 235 | scb->clear_nexus.nexus = NEXUS_TAG; |
@@ -195,9 +245,6 @@ static int asd_clear_nexus_index(struct sas_task *task) | |||
195 | { | 245 | { |
196 | struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha; | 246 | struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha; |
197 | struct asd_ascb *tascb = task->lldd_task; | 247 | struct asd_ascb *tascb = task->lldd_task; |
198 | struct asd_ascb *ascb; | ||
199 | struct scb *scb; | ||
200 | int res; | ||
201 | 248 | ||
202 | CLEAR_NEXUS_PRE; | 249 | CLEAR_NEXUS_PRE; |
203 | scb->clear_nexus.nexus = NEXUS_TRANS_CX; | 250 | scb->clear_nexus.nexus = NEXUS_TRANS_CX; |
@@ -213,11 +260,11 @@ static int asd_clear_nexus_index(struct sas_task *task) | |||
213 | static void asd_tmf_timedout(unsigned long data) | 260 | static void asd_tmf_timedout(unsigned long data) |
214 | { | 261 | { |
215 | struct asd_ascb *ascb = (void *) data; | 262 | struct asd_ascb *ascb = (void *) data; |
263 | struct tasklet_completion_status *tcs = ascb->uldd_task; | ||
216 | 264 | ||
217 | ASD_DPRINTK("tmf timed out\n"); | 265 | ASD_DPRINTK("tmf timed out\n"); |
218 | asd_timedout_common(data); | 266 | tcs->tmf_state = TMF_RESP_FUNC_FAILED; |
219 | ascb->uldd_task = (void *) TMF_RESP_FUNC_FAILED; | 267 | complete(ascb->completion); |
220 | complete(&ascb->completion); | ||
221 | } | 268 | } |
222 | 269 | ||
223 | static int asd_get_tmf_resp_tasklet(struct asd_ascb *ascb, | 270 | static int asd_get_tmf_resp_tasklet(struct asd_ascb *ascb, |
@@ -269,18 +316,24 @@ static int asd_get_tmf_resp_tasklet(struct asd_ascb *ascb, | |||
269 | static void asd_tmf_tasklet_complete(struct asd_ascb *ascb, | 316 | static void asd_tmf_tasklet_complete(struct asd_ascb *ascb, |
270 | struct done_list_struct *dl) | 317 | struct done_list_struct *dl) |
271 | { | 318 | { |
319 | struct tasklet_completion_status *tcs; | ||
320 | |||
272 | if (!del_timer(&ascb->timer)) | 321 | if (!del_timer(&ascb->timer)) |
273 | return; | 322 | return; |
274 | 323 | ||
324 | tcs = ascb->uldd_task; | ||
275 | ASD_DPRINTK("tmf tasklet complete\n"); | 325 | ASD_DPRINTK("tmf tasklet complete\n"); |
276 | 326 | ||
277 | if (dl->opcode == TC_SSP_RESP) | 327 | tcs->dl_opcode = dl->opcode; |
278 | ascb->uldd_task = (void *) (unsigned long) | 328 | |
279 | asd_get_tmf_resp_tasklet(ascb, dl); | 329 | if (dl->opcode == TC_SSP_RESP) { |
280 | else | 330 | tcs->tmf_state = asd_get_tmf_resp_tasklet(ascb, dl); |
281 | ascb->uldd_task = (void *) 0xFF00 + (unsigned long) dl->opcode; | 331 | tcs->tag_valid = ascb->tag_valid; |
332 | tcs->tag = ascb->tag; | ||
333 | } | ||
282 | 334 | ||
283 | complete(&ascb->completion); | 335 | complete(ascb->completion); |
336 | asd_ascb_free(ascb); | ||
284 | } | 337 | } |
285 | 338 | ||
286 | static inline int asd_clear_nexus(struct sas_task *task) | 339 | static inline int asd_clear_nexus(struct sas_task *task) |
@@ -288,15 +341,19 @@ static inline int asd_clear_nexus(struct sas_task *task) | |||
288 | int res = TMF_RESP_FUNC_FAILED; | 341 | int res = TMF_RESP_FUNC_FAILED; |
289 | int leftover; | 342 | int leftover; |
290 | struct asd_ascb *tascb = task->lldd_task; | 343 | struct asd_ascb *tascb = task->lldd_task; |
344 | DECLARE_COMPLETION_ONSTACK(completion); | ||
291 | unsigned long flags; | 345 | unsigned long flags; |
292 | 346 | ||
347 | tascb->completion = &completion; | ||
348 | |||
293 | ASD_DPRINTK("task not done, clearing nexus\n"); | 349 | ASD_DPRINTK("task not done, clearing nexus\n"); |
294 | if (tascb->tag_valid) | 350 | if (tascb->tag_valid) |
295 | res = asd_clear_nexus_tag(task); | 351 | res = asd_clear_nexus_tag(task); |
296 | else | 352 | else |
297 | res = asd_clear_nexus_index(task); | 353 | res = asd_clear_nexus_index(task); |
298 | leftover = wait_for_completion_timeout(&tascb->completion, | 354 | leftover = wait_for_completion_timeout(&completion, |
299 | AIC94XX_SCB_TIMEOUT); | 355 | AIC94XX_SCB_TIMEOUT); |
356 | tascb->completion = NULL; | ||
300 | ASD_DPRINTK("came back from clear nexus\n"); | 357 | ASD_DPRINTK("came back from clear nexus\n"); |
301 | spin_lock_irqsave(&task->task_state_lock, flags); | 358 | spin_lock_irqsave(&task->task_state_lock, flags); |
302 | if (leftover < 1) | 359 | if (leftover < 1) |
@@ -350,6 +407,11 @@ int asd_abort_task(struct sas_task *task) | |||
350 | struct asd_ascb *ascb = NULL; | 407 | struct asd_ascb *ascb = NULL; |
351 | struct scb *scb; | 408 | struct scb *scb; |
352 | int leftover; | 409 | int leftover; |
410 | DECLARE_TCS(tcs); | ||
411 | DECLARE_COMPLETION_ONSTACK(completion); | ||
412 | DECLARE_COMPLETION_ONSTACK(tascb_completion); | ||
413 | |||
414 | tascb->completion = &tascb_completion; | ||
353 | 415 | ||
354 | spin_lock_irqsave(&task->task_state_lock, flags); | 416 | spin_lock_irqsave(&task->task_state_lock, flags); |
355 | if (task->task_state_flags & SAS_TASK_STATE_DONE) { | 417 | if (task->task_state_flags & SAS_TASK_STATE_DONE) { |
@@ -363,8 +425,10 @@ int asd_abort_task(struct sas_task *task) | |||
363 | ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); | 425 | ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); |
364 | if (!ascb) | 426 | if (!ascb) |
365 | return -ENOMEM; | 427 | return -ENOMEM; |
366 | scb = ascb->scb; | ||
367 | 428 | ||
429 | ascb->uldd_task = &tcs; | ||
430 | ascb->completion = &completion; | ||
431 | scb = ascb->scb; | ||
368 | scb->header.opcode = SCB_ABORT_TASK; | 432 | scb->header.opcode = SCB_ABORT_TASK; |
369 | 433 | ||
370 | switch (task->task_proto) { | 434 | switch (task->task_proto) { |
@@ -406,13 +470,12 @@ int asd_abort_task(struct sas_task *task) | |||
406 | res = asd_enqueue_internal(ascb, asd_tmf_tasklet_complete, | 470 | res = asd_enqueue_internal(ascb, asd_tmf_tasklet_complete, |
407 | asd_tmf_timedout); | 471 | asd_tmf_timedout); |
408 | if (res) | 472 | if (res) |
409 | goto out; | 473 | goto out_free; |
410 | wait_for_completion(&ascb->completion); | 474 | wait_for_completion(&completion); |
411 | ASD_DPRINTK("tmf came back\n"); | 475 | ASD_DPRINTK("tmf came back\n"); |
412 | 476 | ||
413 | res = (int) (unsigned long) ascb->uldd_task; | 477 | tascb->tag = tcs.tag; |
414 | tascb->tag = ascb->tag; | 478 | tascb->tag_valid = tcs.tag_valid; |
415 | tascb->tag_valid = ascb->tag_valid; | ||
416 | 479 | ||
417 | spin_lock_irqsave(&task->task_state_lock, flags); | 480 | spin_lock_irqsave(&task->task_state_lock, flags); |
418 | if (task->task_state_flags & SAS_TASK_STATE_DONE) { | 481 | if (task->task_state_flags & SAS_TASK_STATE_DONE) { |
@@ -423,63 +486,68 @@ int asd_abort_task(struct sas_task *task) | |||
423 | } | 486 | } |
424 | spin_unlock_irqrestore(&task->task_state_lock, flags); | 487 | spin_unlock_irqrestore(&task->task_state_lock, flags); |
425 | 488 | ||
426 | switch (res) { | 489 | if (tcs.dl_opcode == TC_SSP_RESP) { |
427 | /* The task to be aborted has been sent to the device. | 490 | /* The task to be aborted has been sent to the device. |
428 | * We got a Response IU for the ABORT TASK TMF. */ | 491 | * We got a Response IU for the ABORT TASK TMF. */ |
429 | case TC_NO_ERROR + 0xFF00: | 492 | if (tcs.tmf_state == TMF_RESP_FUNC_COMPLETE) |
430 | case TMF_RESP_FUNC_COMPLETE: | 493 | res = asd_clear_nexus(task); |
431 | case TMF_RESP_FUNC_FAILED: | 494 | else |
432 | res = asd_clear_nexus(task); | 495 | res = tcs.tmf_state; |
433 | break; | 496 | } else if (tcs.dl_opcode == TC_NO_ERROR && |
434 | case TMF_RESP_INVALID_FRAME: | 497 | tcs.tmf_state == TMF_RESP_FUNC_FAILED) { |
435 | case TMF_RESP_OVERLAPPED_TAG: | 498 | /* timeout */ |
436 | case TMF_RESP_FUNC_ESUPP: | ||
437 | case TMF_RESP_NO_LUN: | ||
438 | goto out_done; break; | ||
439 | } | ||
440 | /* In the following we assume that the managing layer | ||
441 | * will _never_ make a mistake, when issuing ABORT TASK. | ||
442 | */ | ||
443 | switch (res) { | ||
444 | default: | ||
445 | res = asd_clear_nexus(task); | ||
446 | /* fallthrough */ | ||
447 | case TC_NO_ERROR + 0xFF00: | ||
448 | case TMF_RESP_FUNC_COMPLETE: | ||
449 | break; | ||
450 | /* The task hasn't been sent to the device xor we never got | ||
451 | * a (sane) Response IU for the ABORT TASK TMF. | ||
452 | */ | ||
453 | case TF_NAK_RECV + 0xFF00: | ||
454 | res = TMF_RESP_INVALID_FRAME; | ||
455 | break; | ||
456 | case TF_TMF_TASK_DONE + 0xFF00: /* done but not reported yet */ | ||
457 | res = TMF_RESP_FUNC_FAILED; | 499 | res = TMF_RESP_FUNC_FAILED; |
458 | leftover = wait_for_completion_timeout(&tascb->completion, | 500 | } else { |
459 | AIC94XX_SCB_TIMEOUT); | 501 | /* In the following we assume that the managing layer |
460 | spin_lock_irqsave(&task->task_state_lock, flags); | 502 | * will _never_ make a mistake, when issuing ABORT |
461 | if (leftover < 1) | 503 | * TASK. |
504 | */ | ||
505 | switch (tcs.dl_opcode) { | ||
506 | default: | ||
507 | res = asd_clear_nexus(task); | ||
508 | /* fallthrough */ | ||
509 | case TC_NO_ERROR: | ||
510 | break; | ||
511 | /* The task hasn't been sent to the device xor | ||
512 | * we never got a (sane) Response IU for the | ||
513 | * ABORT TASK TMF. | ||
514 | */ | ||
515 | case TF_NAK_RECV: | ||
516 | res = TMF_RESP_INVALID_FRAME; | ||
517 | break; | ||
518 | case TF_TMF_TASK_DONE: /* done but not reported yet */ | ||
462 | res = TMF_RESP_FUNC_FAILED; | 519 | res = TMF_RESP_FUNC_FAILED; |
463 | if (task->task_state_flags & SAS_TASK_STATE_DONE) | 520 | leftover = |
521 | wait_for_completion_timeout(&tascb_completion, | ||
522 | AIC94XX_SCB_TIMEOUT); | ||
523 | spin_lock_irqsave(&task->task_state_lock, flags); | ||
524 | if (leftover < 1) | ||
525 | res = TMF_RESP_FUNC_FAILED; | ||
526 | if (task->task_state_flags & SAS_TASK_STATE_DONE) | ||
527 | res = TMF_RESP_FUNC_COMPLETE; | ||
528 | spin_unlock_irqrestore(&task->task_state_lock, flags); | ||
529 | break; | ||
530 | case TF_TMF_NO_TAG: | ||
531 | case TF_TMF_TAG_FREE: /* the tag is in the free list */ | ||
532 | case TF_TMF_NO_CONN_HANDLE: /* no such device */ | ||
464 | res = TMF_RESP_FUNC_COMPLETE; | 533 | res = TMF_RESP_FUNC_COMPLETE; |
465 | spin_unlock_irqrestore(&task->task_state_lock, flags); | 534 | break; |
466 | goto out_done; | 535 | case TF_TMF_NO_CTX: /* not in seq, or proto != SSP */ |
467 | case TF_TMF_NO_TAG + 0xFF00: | 536 | res = TMF_RESP_FUNC_ESUPP; |
468 | case TF_TMF_TAG_FREE + 0xFF00: /* the tag is in the free list */ | 537 | break; |
469 | case TF_TMF_NO_CONN_HANDLE + 0xFF00: /* no such device */ | 538 | } |
470 | res = TMF_RESP_FUNC_COMPLETE; | ||
471 | goto out_done; | ||
472 | case TF_TMF_NO_CTX + 0xFF00: /* not in seq, or proto != SSP */ | ||
473 | res = TMF_RESP_FUNC_ESUPP; | ||
474 | goto out; | ||
475 | } | 539 | } |
476 | out_done: | 540 | out_done: |
541 | tascb->completion = NULL; | ||
477 | if (res == TMF_RESP_FUNC_COMPLETE) { | 542 | if (res == TMF_RESP_FUNC_COMPLETE) { |
478 | task->lldd_task = NULL; | 543 | task->lldd_task = NULL; |
479 | mb(); | 544 | mb(); |
480 | asd_ascb_free(tascb); | 545 | asd_ascb_free(tascb); |
481 | } | 546 | } |
482 | out: | 547 | ASD_DPRINTK("task 0x%p aborted, res: 0x%x\n", task, res); |
548 | return res; | ||
549 | |||
550 | out_free: | ||
483 | asd_ascb_free(ascb); | 551 | asd_ascb_free(ascb); |
484 | ASD_DPRINTK("task 0x%p aborted, res: 0x%x\n", task, res); | 552 | ASD_DPRINTK("task 0x%p aborted, res: 0x%x\n", task, res); |
485 | return res; | 553 | return res; |
@@ -507,6 +575,8 @@ static int asd_initiate_ssp_tmf(struct domain_device *dev, u8 *lun, | |||
507 | struct asd_ascb *ascb; | 575 | struct asd_ascb *ascb; |
508 | int res = 1; | 576 | int res = 1; |
509 | struct scb *scb; | 577 | struct scb *scb; |
578 | DECLARE_COMPLETION_ONSTACK(completion); | ||
579 | DECLARE_TCS(tcs); | ||
510 | 580 | ||
511 | if (!(dev->tproto & SAS_PROTOCOL_SSP)) | 581 | if (!(dev->tproto & SAS_PROTOCOL_SSP)) |
512 | return TMF_RESP_FUNC_ESUPP; | 582 | return TMF_RESP_FUNC_ESUPP; |
@@ -514,6 +584,9 @@ static int asd_initiate_ssp_tmf(struct domain_device *dev, u8 *lun, | |||
514 | ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); | 584 | ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); |
515 | if (!ascb) | 585 | if (!ascb) |
516 | return -ENOMEM; | 586 | return -ENOMEM; |
587 | |||
588 | ascb->completion = &completion; | ||
589 | ascb->uldd_task = &tcs; | ||
517 | scb = ascb->scb; | 590 | scb = ascb->scb; |
518 | 591 | ||
519 | if (tmf == TMF_QUERY_TASK) | 592 | if (tmf == TMF_QUERY_TASK) |
@@ -546,31 +619,32 @@ static int asd_initiate_ssp_tmf(struct domain_device *dev, u8 *lun, | |||
546 | asd_tmf_timedout); | 619 | asd_tmf_timedout); |
547 | if (res) | 620 | if (res) |
548 | goto out_err; | 621 | goto out_err; |
549 | wait_for_completion(&ascb->completion); | 622 | wait_for_completion(&completion); |
550 | res = (int) (unsigned long) ascb->uldd_task; | ||
551 | 623 | ||
552 | switch (res) { | 624 | switch (tcs.dl_opcode) { |
553 | case TC_NO_ERROR + 0xFF00: | 625 | case TC_NO_ERROR: |
554 | res = TMF_RESP_FUNC_COMPLETE; | 626 | res = TMF_RESP_FUNC_COMPLETE; |
555 | break; | 627 | break; |
556 | case TF_NAK_RECV + 0xFF00: | 628 | case TF_NAK_RECV: |
557 | res = TMF_RESP_INVALID_FRAME; | 629 | res = TMF_RESP_INVALID_FRAME; |
558 | break; | 630 | break; |
559 | case TF_TMF_TASK_DONE + 0xFF00: | 631 | case TF_TMF_TASK_DONE: |
560 | res = TMF_RESP_FUNC_FAILED; | 632 | res = TMF_RESP_FUNC_FAILED; |
561 | break; | 633 | break; |
562 | case TF_TMF_NO_TAG + 0xFF00: | 634 | case TF_TMF_NO_TAG: |
563 | case TF_TMF_TAG_FREE + 0xFF00: /* the tag is in the free list */ | 635 | case TF_TMF_TAG_FREE: /* the tag is in the free list */ |
564 | case TF_TMF_NO_CONN_HANDLE + 0xFF00: /* no such device */ | 636 | case TF_TMF_NO_CONN_HANDLE: /* no such device */ |
565 | res = TMF_RESP_FUNC_COMPLETE; | 637 | res = TMF_RESP_FUNC_COMPLETE; |
566 | break; | 638 | break; |
567 | case TF_TMF_NO_CTX + 0xFF00: /* not in seq, or proto != SSP */ | 639 | case TF_TMF_NO_CTX: /* not in seq, or proto != SSP */ |
568 | res = TMF_RESP_FUNC_ESUPP; | 640 | res = TMF_RESP_FUNC_ESUPP; |
569 | break; | 641 | break; |
570 | default: | 642 | default: |
571 | /* Allow TMF response codes to propagate upwards */ | 643 | /* Allow TMF response codes to propagate upwards */ |
644 | res = tcs.dl_opcode; | ||
572 | break; | 645 | break; |
573 | } | 646 | } |
647 | return res; | ||
574 | out_err: | 648 | out_err: |
575 | asd_ascb_free(ascb); | 649 | asd_ascb_free(ascb); |
576 | return res; | 650 | return res; |
diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h index 57786502e3ec..0393707bdfce 100644 --- a/drivers/scsi/arcmsr/arcmsr.h +++ b/drivers/scsi/arcmsr/arcmsr.h | |||
@@ -48,7 +48,7 @@ struct class_device_attribute; | |||
48 | /*The limit of outstanding scsi command that firmware can handle*/ | 48 | /*The limit of outstanding scsi command that firmware can handle*/ |
49 | #define ARCMSR_MAX_OUTSTANDING_CMD 256 | 49 | #define ARCMSR_MAX_OUTSTANDING_CMD 256 |
50 | #define ARCMSR_MAX_FREECCB_NUM 320 | 50 | #define ARCMSR_MAX_FREECCB_NUM 320 |
51 | #define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.15 2007/12/24" | 51 | #define ARCMSR_DRIVER_VERSION "Driver Version 1.20.00.15 2008/02/27" |
52 | #define ARCMSR_SCSI_INITIATOR_ID 255 | 52 | #define ARCMSR_SCSI_INITIATOR_ID 255 |
53 | #define ARCMSR_MAX_XFER_SECTORS 512 | 53 | #define ARCMSR_MAX_XFER_SECTORS 512 |
54 | #define ARCMSR_MAX_XFER_SECTORS_B 4096 | 54 | #define ARCMSR_MAX_XFER_SECTORS_B 4096 |
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c index 6d67f5c0eb8e..27ebd336409b 100644 --- a/drivers/scsi/gdth.c +++ b/drivers/scsi/gdth.c | |||
@@ -160,7 +160,7 @@ static void gdth_readapp_event(gdth_ha_str *ha, unchar application, | |||
160 | static void gdth_clear_events(void); | 160 | static void gdth_clear_events(void); |
161 | 161 | ||
162 | static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp, | 162 | static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp, |
163 | char *buffer, ushort count, int to_buffer); | 163 | char *buffer, ushort count); |
164 | static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp); | 164 | static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp); |
165 | static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, ushort hdrive); | 165 | static int gdth_fill_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, ushort hdrive); |
166 | 166 | ||
@@ -182,7 +182,6 @@ static int gdth_ioctl(struct inode *inode, struct file *filep, | |||
182 | unsigned int cmd, unsigned long arg); | 182 | unsigned int cmd, unsigned long arg); |
183 | 183 | ||
184 | static void gdth_flush(gdth_ha_str *ha); | 184 | static void gdth_flush(gdth_ha_str *ha); |
185 | static int gdth_halt(struct notifier_block *nb, ulong event, void *buf); | ||
186 | static int gdth_queuecommand(Scsi_Cmnd *scp,void (*done)(Scsi_Cmnd *)); | 185 | static int gdth_queuecommand(Scsi_Cmnd *scp,void (*done)(Scsi_Cmnd *)); |
187 | static int __gdth_queuecommand(gdth_ha_str *ha, struct scsi_cmnd *scp, | 186 | static int __gdth_queuecommand(gdth_ha_str *ha, struct scsi_cmnd *scp, |
188 | struct gdth_cmndinfo *cmndinfo); | 187 | struct gdth_cmndinfo *cmndinfo); |
@@ -417,12 +416,6 @@ static inline void gdth_set_sglist(struct scsi_cmnd *cmd, | |||
417 | #include "gdth_proc.h" | 416 | #include "gdth_proc.h" |
418 | #include "gdth_proc.c" | 417 | #include "gdth_proc.c" |
419 | 418 | ||
420 | /* notifier block to get a notify on system shutdown/halt/reboot */ | ||
421 | static struct notifier_block gdth_notifier = { | ||
422 | gdth_halt, NULL, 0 | ||
423 | }; | ||
424 | static int notifier_disabled = 0; | ||
425 | |||
426 | static gdth_ha_str *gdth_find_ha(int hanum) | 419 | static gdth_ha_str *gdth_find_ha(int hanum) |
427 | { | 420 | { |
428 | gdth_ha_str *ha; | 421 | gdth_ha_str *ha; |
@@ -445,8 +438,8 @@ static struct gdth_cmndinfo *gdth_get_cmndinfo(gdth_ha_str *ha) | |||
445 | for (i=0; i<GDTH_MAXCMDS; ++i) { | 438 | for (i=0; i<GDTH_MAXCMDS; ++i) { |
446 | if (ha->cmndinfo[i].index == 0) { | 439 | if (ha->cmndinfo[i].index == 0) { |
447 | priv = &ha->cmndinfo[i]; | 440 | priv = &ha->cmndinfo[i]; |
448 | priv->index = i+1; | ||
449 | memset(priv, 0, sizeof(*priv)); | 441 | memset(priv, 0, sizeof(*priv)); |
442 | priv->index = i+1; | ||
450 | break; | 443 | break; |
451 | } | 444 | } |
452 | } | 445 | } |
@@ -493,7 +486,6 @@ int __gdth_execute(struct scsi_device *sdev, gdth_cmd_str *gdtcmd, char *cmnd, | |||
493 | gdth_ha_str *ha = shost_priv(sdev->host); | 486 | gdth_ha_str *ha = shost_priv(sdev->host); |
494 | Scsi_Cmnd *scp; | 487 | Scsi_Cmnd *scp; |
495 | struct gdth_cmndinfo cmndinfo; | 488 | struct gdth_cmndinfo cmndinfo; |
496 | struct scatterlist one_sg; | ||
497 | DECLARE_COMPLETION_ONSTACK(wait); | 489 | DECLARE_COMPLETION_ONSTACK(wait); |
498 | int rval; | 490 | int rval; |
499 | 491 | ||
@@ -507,13 +499,10 @@ int __gdth_execute(struct scsi_device *sdev, gdth_cmd_str *gdtcmd, char *cmnd, | |||
507 | /* use request field to save the ptr. to completion struct. */ | 499 | /* use request field to save the ptr. to completion struct. */ |
508 | scp->request = (struct request *)&wait; | 500 | scp->request = (struct request *)&wait; |
509 | scp->timeout_per_command = timeout*HZ; | 501 | scp->timeout_per_command = timeout*HZ; |
510 | sg_init_one(&one_sg, gdtcmd, sizeof(*gdtcmd)); | ||
511 | gdth_set_sglist(scp, &one_sg); | ||
512 | gdth_set_sg_count(scp, 1); | ||
513 | gdth_set_bufflen(scp, sizeof(*gdtcmd)); | ||
514 | scp->cmd_len = 12; | 502 | scp->cmd_len = 12; |
515 | memcpy(scp->cmnd, cmnd, 12); | 503 | memcpy(scp->cmnd, cmnd, 12); |
516 | cmndinfo.priority = IOCTL_PRI; | 504 | cmndinfo.priority = IOCTL_PRI; |
505 | cmndinfo.internal_cmd_str = gdtcmd; | ||
517 | cmndinfo.internal_command = 1; | 506 | cmndinfo.internal_command = 1; |
518 | 507 | ||
519 | TRACE(("__gdth_execute() cmd 0x%x\n", scp->cmnd[0])); | 508 | TRACE(("__gdth_execute() cmd 0x%x\n", scp->cmnd[0])); |
@@ -2355,7 +2344,7 @@ static void gdth_next(gdth_ha_str *ha) | |||
2355 | * buffers, kmap_atomic() as needed. | 2344 | * buffers, kmap_atomic() as needed. |
2356 | */ | 2345 | */ |
2357 | static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp, | 2346 | static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp, |
2358 | char *buffer, ushort count, int to_buffer) | 2347 | char *buffer, ushort count) |
2359 | { | 2348 | { |
2360 | ushort cpcount,i, max_sg = gdth_sg_count(scp); | 2349 | ushort cpcount,i, max_sg = gdth_sg_count(scp); |
2361 | ushort cpsum,cpnow; | 2350 | ushort cpsum,cpnow; |
@@ -2381,10 +2370,7 @@ static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp, | |||
2381 | } | 2370 | } |
2382 | local_irq_save(flags); | 2371 | local_irq_save(flags); |
2383 | address = kmap_atomic(sg_page(sl), KM_BIO_SRC_IRQ) + sl->offset; | 2372 | address = kmap_atomic(sg_page(sl), KM_BIO_SRC_IRQ) + sl->offset; |
2384 | if (to_buffer) | 2373 | memcpy(address, buffer, cpnow); |
2385 | memcpy(buffer, address, cpnow); | ||
2386 | else | ||
2387 | memcpy(address, buffer, cpnow); | ||
2388 | flush_dcache_page(sg_page(sl)); | 2374 | flush_dcache_page(sg_page(sl)); |
2389 | kunmap_atomic(address, KM_BIO_SRC_IRQ); | 2375 | kunmap_atomic(address, KM_BIO_SRC_IRQ); |
2390 | local_irq_restore(flags); | 2376 | local_irq_restore(flags); |
@@ -2438,7 +2424,7 @@ static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp) | |||
2438 | strcpy(inq.vendor,ha->oem_name); | 2424 | strcpy(inq.vendor,ha->oem_name); |
2439 | sprintf(inq.product,"Host Drive #%02d",t); | 2425 | sprintf(inq.product,"Host Drive #%02d",t); |
2440 | strcpy(inq.revision," "); | 2426 | strcpy(inq.revision," "); |
2441 | gdth_copy_internal_data(ha, scp, (char*)&inq, sizeof(gdth_inq_data), 0); | 2427 | gdth_copy_internal_data(ha, scp, (char*)&inq, sizeof(gdth_inq_data)); |
2442 | break; | 2428 | break; |
2443 | 2429 | ||
2444 | case REQUEST_SENSE: | 2430 | case REQUEST_SENSE: |
@@ -2448,7 +2434,7 @@ static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp) | |||
2448 | sd.key = NO_SENSE; | 2434 | sd.key = NO_SENSE; |
2449 | sd.info = 0; | 2435 | sd.info = 0; |
2450 | sd.add_length= 0; | 2436 | sd.add_length= 0; |
2451 | gdth_copy_internal_data(ha, scp, (char*)&sd, sizeof(gdth_sense_data), 0); | 2437 | gdth_copy_internal_data(ha, scp, (char*)&sd, sizeof(gdth_sense_data)); |
2452 | break; | 2438 | break; |
2453 | 2439 | ||
2454 | case MODE_SENSE: | 2440 | case MODE_SENSE: |
@@ -2460,7 +2446,7 @@ static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp) | |||
2460 | mpd.bd.block_length[0] = (SECTOR_SIZE & 0x00ff0000) >> 16; | 2446 | mpd.bd.block_length[0] = (SECTOR_SIZE & 0x00ff0000) >> 16; |
2461 | mpd.bd.block_length[1] = (SECTOR_SIZE & 0x0000ff00) >> 8; | 2447 | mpd.bd.block_length[1] = (SECTOR_SIZE & 0x0000ff00) >> 8; |
2462 | mpd.bd.block_length[2] = (SECTOR_SIZE & 0x000000ff); | 2448 | mpd.bd.block_length[2] = (SECTOR_SIZE & 0x000000ff); |
2463 | gdth_copy_internal_data(ha, scp, (char*)&mpd, sizeof(gdth_modep_data), 0); | 2449 | gdth_copy_internal_data(ha, scp, (char*)&mpd, sizeof(gdth_modep_data)); |
2464 | break; | 2450 | break; |
2465 | 2451 | ||
2466 | case READ_CAPACITY: | 2452 | case READ_CAPACITY: |
@@ -2470,7 +2456,7 @@ static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp) | |||
2470 | else | 2456 | else |
2471 | rdc.last_block_no = cpu_to_be32(ha->hdr[t].size-1); | 2457 | rdc.last_block_no = cpu_to_be32(ha->hdr[t].size-1); |
2472 | rdc.block_length = cpu_to_be32(SECTOR_SIZE); | 2458 | rdc.block_length = cpu_to_be32(SECTOR_SIZE); |
2473 | gdth_copy_internal_data(ha, scp, (char*)&rdc, sizeof(gdth_rdcap_data), 0); | 2459 | gdth_copy_internal_data(ha, scp, (char*)&rdc, sizeof(gdth_rdcap_data)); |
2474 | break; | 2460 | break; |
2475 | 2461 | ||
2476 | case SERVICE_ACTION_IN: | 2462 | case SERVICE_ACTION_IN: |
@@ -2482,7 +2468,7 @@ static int gdth_internal_cache_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp) | |||
2482 | rdc16.last_block_no = cpu_to_be64(ha->hdr[t].size-1); | 2468 | rdc16.last_block_no = cpu_to_be64(ha->hdr[t].size-1); |
2483 | rdc16.block_length = cpu_to_be32(SECTOR_SIZE); | 2469 | rdc16.block_length = cpu_to_be32(SECTOR_SIZE); |
2484 | gdth_copy_internal_data(ha, scp, (char*)&rdc16, | 2470 | gdth_copy_internal_data(ha, scp, (char*)&rdc16, |
2485 | sizeof(gdth_rdcap16_data), 0); | 2471 | sizeof(gdth_rdcap16_data)); |
2486 | } else { | 2472 | } else { |
2487 | scp->result = DID_ABORT << 16; | 2473 | scp->result = DID_ABORT << 16; |
2488 | } | 2474 | } |
@@ -2852,6 +2838,7 @@ static int gdth_fill_raw_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp, unchar b) | |||
2852 | static int gdth_special_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp) | 2838 | static int gdth_special_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp) |
2853 | { | 2839 | { |
2854 | register gdth_cmd_str *cmdp; | 2840 | register gdth_cmd_str *cmdp; |
2841 | struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp); | ||
2855 | int cmd_index; | 2842 | int cmd_index; |
2856 | 2843 | ||
2857 | cmdp= ha->pccb; | 2844 | cmdp= ha->pccb; |
@@ -2860,7 +2847,7 @@ static int gdth_special_cmd(gdth_ha_str *ha, Scsi_Cmnd *scp) | |||
2860 | if (ha->type==GDT_EISA && ha->cmd_cnt>0) | 2847 | if (ha->type==GDT_EISA && ha->cmd_cnt>0) |
2861 | return 0; | 2848 | return 0; |
2862 | 2849 | ||
2863 | gdth_copy_internal_data(ha, scp, (char *)cmdp, sizeof(gdth_cmd_str), 1); | 2850 | *cmdp = *cmndinfo->internal_cmd_str; |
2864 | cmdp->RequestBuffer = scp; | 2851 | cmdp->RequestBuffer = scp; |
2865 | 2852 | ||
2866 | /* search free command index */ | 2853 | /* search free command index */ |
@@ -3794,6 +3781,8 @@ static void gdth_timeout(ulong data) | |||
3794 | gdth_ha_str *ha; | 3781 | gdth_ha_str *ha; |
3795 | ulong flags; | 3782 | ulong flags; |
3796 | 3783 | ||
3784 | BUG_ON(list_empty(&gdth_instances)); | ||
3785 | |||
3797 | ha = list_first_entry(&gdth_instances, gdth_ha_str, list); | 3786 | ha = list_first_entry(&gdth_instances, gdth_ha_str, list); |
3798 | spin_lock_irqsave(&ha->smp_lock, flags); | 3787 | spin_lock_irqsave(&ha->smp_lock, flags); |
3799 | 3788 | ||
@@ -4669,45 +4658,6 @@ static void gdth_flush(gdth_ha_str *ha) | |||
4669 | } | 4658 | } |
4670 | } | 4659 | } |
4671 | 4660 | ||
4672 | /* shutdown routine */ | ||
4673 | static int gdth_halt(struct notifier_block *nb, ulong event, void *buf) | ||
4674 | { | ||
4675 | gdth_ha_str *ha; | ||
4676 | #ifndef __alpha__ | ||
4677 | gdth_cmd_str gdtcmd; | ||
4678 | char cmnd[MAX_COMMAND_SIZE]; | ||
4679 | #endif | ||
4680 | |||
4681 | if (notifier_disabled) | ||
4682 | return NOTIFY_OK; | ||
4683 | |||
4684 | TRACE2(("gdth_halt() event %d\n",(int)event)); | ||
4685 | if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF) | ||
4686 | return NOTIFY_DONE; | ||
4687 | |||
4688 | notifier_disabled = 1; | ||
4689 | printk("GDT-HA: Flushing all host drives .. "); | ||
4690 | list_for_each_entry(ha, &gdth_instances, list) { | ||
4691 | gdth_flush(ha); | ||
4692 | |||
4693 | #ifndef __alpha__ | ||
4694 | /* controller reset */ | ||
4695 | memset(cmnd, 0xff, MAX_COMMAND_SIZE); | ||
4696 | gdtcmd.BoardNode = LOCALBOARD; | ||
4697 | gdtcmd.Service = CACHESERVICE; | ||
4698 | gdtcmd.OpCode = GDT_RESET; | ||
4699 | TRACE2(("gdth_halt(): reset controller %d\n", ha->hanum)); | ||
4700 | gdth_execute(ha->shost, &gdtcmd, cmnd, 10, NULL); | ||
4701 | #endif | ||
4702 | } | ||
4703 | printk("Done.\n"); | ||
4704 | |||
4705 | #ifdef GDTH_STATISTICS | ||
4706 | del_timer(&gdth_timer); | ||
4707 | #endif | ||
4708 | return NOTIFY_OK; | ||
4709 | } | ||
4710 | |||
4711 | /* configure lun */ | 4661 | /* configure lun */ |
4712 | static int gdth_slave_configure(struct scsi_device *sdev) | 4662 | static int gdth_slave_configure(struct scsi_device *sdev) |
4713 | { | 4663 | { |
@@ -5142,13 +5092,13 @@ static void gdth_remove_one(gdth_ha_str *ha) | |||
5142 | 5092 | ||
5143 | scsi_remove_host(shp); | 5093 | scsi_remove_host(shp); |
5144 | 5094 | ||
5095 | gdth_flush(ha); | ||
5096 | |||
5145 | if (ha->sdev) { | 5097 | if (ha->sdev) { |
5146 | scsi_free_host_dev(ha->sdev); | 5098 | scsi_free_host_dev(ha->sdev); |
5147 | ha->sdev = NULL; | 5099 | ha->sdev = NULL; |
5148 | } | 5100 | } |
5149 | 5101 | ||
5150 | gdth_flush(ha); | ||
5151 | |||
5152 | if (shp->irq) | 5102 | if (shp->irq) |
5153 | free_irq(shp->irq,ha); | 5103 | free_irq(shp->irq,ha); |
5154 | 5104 | ||
@@ -5174,6 +5124,24 @@ static void gdth_remove_one(gdth_ha_str *ha) | |||
5174 | scsi_host_put(shp); | 5124 | scsi_host_put(shp); |
5175 | } | 5125 | } |
5176 | 5126 | ||
5127 | static int gdth_halt(struct notifier_block *nb, ulong event, void *buf) | ||
5128 | { | ||
5129 | gdth_ha_str *ha; | ||
5130 | |||
5131 | TRACE2(("gdth_halt() event %d\n", (int)event)); | ||
5132 | if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF) | ||
5133 | return NOTIFY_DONE; | ||
5134 | |||
5135 | list_for_each_entry(ha, &gdth_instances, list) | ||
5136 | gdth_flush(ha); | ||
5137 | |||
5138 | return NOTIFY_OK; | ||
5139 | } | ||
5140 | |||
5141 | static struct notifier_block gdth_notifier = { | ||
5142 | gdth_halt, NULL, 0 | ||
5143 | }; | ||
5144 | |||
5177 | static int __init gdth_init(void) | 5145 | static int __init gdth_init(void) |
5178 | { | 5146 | { |
5179 | if (disable) { | 5147 | if (disable) { |
@@ -5236,7 +5204,6 @@ static int __init gdth_init(void) | |||
5236 | add_timer(&gdth_timer); | 5204 | add_timer(&gdth_timer); |
5237 | #endif | 5205 | #endif |
5238 | major = register_chrdev(0,"gdth", &gdth_fops); | 5206 | major = register_chrdev(0,"gdth", &gdth_fops); |
5239 | notifier_disabled = 0; | ||
5240 | register_reboot_notifier(&gdth_notifier); | 5207 | register_reboot_notifier(&gdth_notifier); |
5241 | gdth_polling = FALSE; | 5208 | gdth_polling = FALSE; |
5242 | return 0; | 5209 | return 0; |
@@ -5246,14 +5213,15 @@ static void __exit gdth_exit(void) | |||
5246 | { | 5213 | { |
5247 | gdth_ha_str *ha; | 5214 | gdth_ha_str *ha; |
5248 | 5215 | ||
5249 | list_for_each_entry(ha, &gdth_instances, list) | 5216 | unregister_chrdev(major, "gdth"); |
5250 | gdth_remove_one(ha); | 5217 | unregister_reboot_notifier(&gdth_notifier); |
5251 | 5218 | ||
5252 | #ifdef GDTH_STATISTICS | 5219 | #ifdef GDTH_STATISTICS |
5253 | del_timer(&gdth_timer); | 5220 | del_timer_sync(&gdth_timer); |
5254 | #endif | 5221 | #endif |
5255 | unregister_chrdev(major,"gdth"); | 5222 | |
5256 | unregister_reboot_notifier(&gdth_notifier); | 5223 | list_for_each_entry(ha, &gdth_instances, list) |
5224 | gdth_remove_one(ha); | ||
5257 | } | 5225 | } |
5258 | 5226 | ||
5259 | module_init(gdth_init); | 5227 | module_init(gdth_init); |
diff --git a/drivers/scsi/gdth.h b/drivers/scsi/gdth.h index 1434c6b0297c..26e4e92515e0 100644 --- a/drivers/scsi/gdth.h +++ b/drivers/scsi/gdth.h | |||
@@ -915,6 +915,7 @@ typedef struct { | |||
915 | struct gdth_cmndinfo { /* per-command private info */ | 915 | struct gdth_cmndinfo { /* per-command private info */ |
916 | int index; | 916 | int index; |
917 | int internal_command; /* don't call scsi_done */ | 917 | int internal_command; /* don't call scsi_done */ |
918 | gdth_cmd_str *internal_cmd_str; /* crier for internal messages*/ | ||
918 | dma_addr_t sense_paddr; /* sense dma-addr */ | 919 | dma_addr_t sense_paddr; /* sense dma-addr */ |
919 | unchar priority; | 920 | unchar priority; |
920 | int timeout; | 921 | int timeout; |
diff --git a/drivers/scsi/ibmvscsi/ibmvstgt.c b/drivers/scsi/ibmvscsi/ibmvstgt.c index bd62131b97a1..e5881e92d0fb 100644 --- a/drivers/scsi/ibmvscsi/ibmvstgt.c +++ b/drivers/scsi/ibmvscsi/ibmvstgt.c | |||
@@ -290,7 +290,7 @@ static int ibmvstgt_cmd_done(struct scsi_cmnd *sc, | |||
290 | int err = 0; | 290 | int err = 0; |
291 | 291 | ||
292 | dprintk("%p %p %x %u\n", iue, target, vio_iu(iue)->srp.cmd.cdb[0], | 292 | dprintk("%p %p %x %u\n", iue, target, vio_iu(iue)->srp.cmd.cdb[0], |
293 | cmd->usg_sg); | 293 | scsi_sg_count(sc)); |
294 | 294 | ||
295 | if (scsi_sg_count(sc)) | 295 | if (scsi_sg_count(sc)) |
296 | err = srp_transfer_data(sc, &vio_iu(iue)->srp.cmd, ibmvstgt_rdma, 1, 1); | 296 | err = srp_transfer_data(sc, &vio_iu(iue)->srp.cmd, ibmvstgt_rdma, 1, 1); |
@@ -838,9 +838,6 @@ static int ibmvstgt_probe(struct vio_dev *dev, const struct vio_device_id *id) | |||
838 | if (!shost) | 838 | if (!shost) |
839 | goto free_vport; | 839 | goto free_vport; |
840 | shost->transportt = ibmvstgt_transport_template; | 840 | shost->transportt = ibmvstgt_transport_template; |
841 | err = scsi_tgt_alloc_queue(shost); | ||
842 | if (err) | ||
843 | goto put_host; | ||
844 | 841 | ||
845 | target = host_to_srp_target(shost); | 842 | target = host_to_srp_target(shost); |
846 | target->shost = shost; | 843 | target->shost = shost; |
@@ -872,6 +869,10 @@ static int ibmvstgt_probe(struct vio_dev *dev, const struct vio_device_id *id) | |||
872 | if (err) | 869 | if (err) |
873 | goto destroy_queue; | 870 | goto destroy_queue; |
874 | 871 | ||
872 | err = scsi_tgt_alloc_queue(shost); | ||
873 | if (err) | ||
874 | goto destroy_queue; | ||
875 | |||
875 | return 0; | 876 | return 0; |
876 | destroy_queue: | 877 | destroy_queue: |
877 | crq_queue_destroy(target); | 878 | crq_queue_destroy(target); |
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 59f8445eab0d..bdd7de7da39a 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c | |||
@@ -1708,8 +1708,8 @@ iscsi_session_setup(struct iscsi_transport *iscsit, | |||
1708 | qdepth = ISCSI_DEF_CMD_PER_LUN; | 1708 | qdepth = ISCSI_DEF_CMD_PER_LUN; |
1709 | } | 1709 | } |
1710 | 1710 | ||
1711 | if (!is_power_of_2(cmds_max) || | 1711 | if (!is_power_of_2(cmds_max) || cmds_max >= ISCSI_MGMT_ITT_OFFSET || |
1712 | cmds_max >= ISCSI_MGMT_ITT_OFFSET) { | 1712 | cmds_max < 2) { |
1713 | if (cmds_max != 0) | 1713 | if (cmds_max != 0) |
1714 | printk(KERN_ERR "iscsi: invalid can_queue of %d. " | 1714 | printk(KERN_ERR "iscsi: invalid can_queue of %d. " |
1715 | "can_queue must be a power of 2 and between " | 1715 | "can_queue must be a power of 2 and between " |
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index 7cd05b599a12..b0e5ac372a32 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c | |||
@@ -236,12 +236,12 @@ static void sas_ata_phy_reset(struct ata_port *ap) | |||
236 | struct domain_device *dev = ap->private_data; | 236 | struct domain_device *dev = ap->private_data; |
237 | struct sas_internal *i = | 237 | struct sas_internal *i = |
238 | to_sas_internal(dev->port->ha->core.shost->transportt); | 238 | to_sas_internal(dev->port->ha->core.shost->transportt); |
239 | int res = 0; | 239 | int res = TMF_RESP_FUNC_FAILED; |
240 | 240 | ||
241 | if (i->dft->lldd_I_T_nexus_reset) | 241 | if (i->dft->lldd_I_T_nexus_reset) |
242 | res = i->dft->lldd_I_T_nexus_reset(dev); | 242 | res = i->dft->lldd_I_T_nexus_reset(dev); |
243 | 243 | ||
244 | if (res) | 244 | if (res != TMF_RESP_FUNC_COMPLETE) |
245 | SAS_DPRINTK("%s: Unable to reset I T nexus?\n", __FUNCTION__); | 245 | SAS_DPRINTK("%s: Unable to reset I T nexus?\n", __FUNCTION__); |
246 | 246 | ||
247 | switch (dev->sata_dev.command_set) { | 247 | switch (dev->sata_dev.command_set) { |
@@ -656,21 +656,6 @@ out: | |||
656 | return res; | 656 | return res; |
657 | } | 657 | } |
658 | 658 | ||
659 | static void sas_sata_propagate_sas_addr(struct domain_device *dev) | ||
660 | { | ||
661 | unsigned long flags; | ||
662 | struct asd_sas_port *port = dev->port; | ||
663 | struct asd_sas_phy *phy; | ||
664 | |||
665 | BUG_ON(dev->parent); | ||
666 | |||
667 | memcpy(port->attached_sas_addr, dev->sas_addr, SAS_ADDR_SIZE); | ||
668 | spin_lock_irqsave(&port->phy_list_lock, flags); | ||
669 | list_for_each_entry(phy, &port->phy_list, port_phy_el) | ||
670 | memcpy(phy->attached_sas_addr, dev->sas_addr, SAS_ADDR_SIZE); | ||
671 | spin_unlock_irqrestore(&port->phy_list_lock, flags); | ||
672 | } | ||
673 | |||
674 | #define ATA_IDENTIFY_DEV 0xEC | 659 | #define ATA_IDENTIFY_DEV 0xEC |
675 | #define ATA_IDENTIFY_PACKET_DEV 0xA1 | 660 | #define ATA_IDENTIFY_PACKET_DEV 0xA1 |
676 | #define ATA_SET_FEATURES 0xEF | 661 | #define ATA_SET_FEATURES 0xEF |
@@ -728,26 +713,6 @@ static int sas_discover_sata_dev(struct domain_device *dev) | |||
728 | goto out_err; | 713 | goto out_err; |
729 | } | 714 | } |
730 | cont1: | 715 | cont1: |
731 | /* Get WWN */ | ||
732 | if (dev->port->oob_mode != SATA_OOB_MODE) { | ||
733 | memcpy(dev->sas_addr, dev->sata_dev.rps_resp.rps.stp_sas_addr, | ||
734 | SAS_ADDR_SIZE); | ||
735 | } else if (dev->sata_dev.command_set == ATA_COMMAND_SET && | ||
736 | (le16_to_cpu(dev->sata_dev.identify_device[108]) & 0xF000) | ||
737 | == 0x5000) { | ||
738 | int i; | ||
739 | |||
740 | for (i = 0; i < 4; i++) { | ||
741 | dev->sas_addr[2*i] = | ||
742 | (le16_to_cpu(dev->sata_dev.identify_device[108+i]) & 0xFF00) >> 8; | ||
743 | dev->sas_addr[2*i+1] = | ||
744 | le16_to_cpu(dev->sata_dev.identify_device[108+i]) & 0x00FF; | ||
745 | } | ||
746 | } | ||
747 | sas_hash_addr(dev->hashed_sas_addr, dev->sas_addr); | ||
748 | if (!dev->parent) | ||
749 | sas_sata_propagate_sas_addr(dev); | ||
750 | |||
751 | /* XXX Hint: register this SATA device with SATL. | 716 | /* XXX Hint: register this SATA device with SATL. |
752 | When this returns, dev->sata_dev->lu is alive and | 717 | When this returns, dev->sata_dev->lu is alive and |
753 | present. | 718 | present. |
diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c index e1e2d085c920..39ae68a3b0ef 100644 --- a/drivers/scsi/libsas/sas_port.c +++ b/drivers/scsi/libsas/sas_port.c | |||
@@ -92,9 +92,6 @@ static void sas_form_port(struct asd_sas_phy *phy) | |||
92 | if (!port->phy) | 92 | if (!port->phy) |
93 | port->phy = phy->phy; | 93 | port->phy = phy->phy; |
94 | 94 | ||
95 | SAS_DPRINTK("phy%d added to port%d, phy_mask:0x%x\n", phy->id, | ||
96 | port->id, port->phy_mask); | ||
97 | |||
98 | if (*(u64 *)port->attached_sas_addr == 0) { | 95 | if (*(u64 *)port->attached_sas_addr == 0) { |
99 | port->class = phy->class; | 96 | port->class = phy->class; |
100 | memcpy(port->attached_sas_addr, phy->attached_sas_addr, | 97 | memcpy(port->attached_sas_addr, phy->attached_sas_addr, |
@@ -115,6 +112,11 @@ static void sas_form_port(struct asd_sas_phy *phy) | |||
115 | } | 112 | } |
116 | sas_port_add_phy(port->port, phy->phy); | 113 | sas_port_add_phy(port->port, phy->phy); |
117 | 114 | ||
115 | SAS_DPRINTK("%s added to %s, phy_mask:0x%x (%16llx)\n", | ||
116 | phy->phy->dev.bus_id,port->port->dev.bus_id, | ||
117 | port->phy_mask, | ||
118 | SAS_ADDR(port->attached_sas_addr)); | ||
119 | |||
118 | if (port->port_dev) | 120 | if (port->port_dev) |
119 | port->port_dev->pathways = port->num_phys; | 121 | port->port_dev->pathways = port->num_phys; |
120 | 122 | ||
@@ -255,12 +257,11 @@ void sas_porte_hard_reset(struct work_struct *work) | |||
255 | static void sas_init_port(struct asd_sas_port *port, | 257 | static void sas_init_port(struct asd_sas_port *port, |
256 | struct sas_ha_struct *sas_ha, int i) | 258 | struct sas_ha_struct *sas_ha, int i) |
257 | { | 259 | { |
260 | memset(port, 0, sizeof(*port)); | ||
258 | port->id = i; | 261 | port->id = i; |
259 | INIT_LIST_HEAD(&port->dev_list); | 262 | INIT_LIST_HEAD(&port->dev_list); |
260 | spin_lock_init(&port->phy_list_lock); | 263 | spin_lock_init(&port->phy_list_lock); |
261 | INIT_LIST_HEAD(&port->phy_list); | 264 | INIT_LIST_HEAD(&port->phy_list); |
262 | port->num_phys = 0; | ||
263 | port->phy_mask = 0; | ||
264 | port->ha = sas_ha; | 265 | port->ha = sas_ha; |
265 | 266 | ||
266 | spin_lock_init(&port->dev_list_lock); | 267 | spin_lock_init(&port->dev_list_lock); |
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c index 704ea06a6e50..1f8241563c6c 100644 --- a/drivers/scsi/libsas/sas_scsi_host.c +++ b/drivers/scsi/libsas/sas_scsi_host.c | |||
@@ -434,7 +434,7 @@ static int sas_recover_I_T(struct domain_device *dev) | |||
434 | } | 434 | } |
435 | 435 | ||
436 | /* Find the sas_phy that's attached to this device */ | 436 | /* Find the sas_phy that's attached to this device */ |
437 | static struct sas_phy *find_local_sas_phy(struct domain_device *dev) | 437 | struct sas_phy *sas_find_local_phy(struct domain_device *dev) |
438 | { | 438 | { |
439 | struct domain_device *pdev = dev->parent; | 439 | struct domain_device *pdev = dev->parent; |
440 | struct ex_phy *exphy = NULL; | 440 | struct ex_phy *exphy = NULL; |
@@ -456,6 +456,7 @@ static struct sas_phy *find_local_sas_phy(struct domain_device *dev) | |||
456 | BUG_ON(!exphy); | 456 | BUG_ON(!exphy); |
457 | return exphy->phy; | 457 | return exphy->phy; |
458 | } | 458 | } |
459 | EXPORT_SYMBOL_GPL(sas_find_local_phy); | ||
459 | 460 | ||
460 | /* Attempt to send a LUN reset message to a device */ | 461 | /* Attempt to send a LUN reset message to a device */ |
461 | int sas_eh_device_reset_handler(struct scsi_cmnd *cmd) | 462 | int sas_eh_device_reset_handler(struct scsi_cmnd *cmd) |
@@ -482,7 +483,7 @@ int sas_eh_device_reset_handler(struct scsi_cmnd *cmd) | |||
482 | int sas_eh_bus_reset_handler(struct scsi_cmnd *cmd) | 483 | int sas_eh_bus_reset_handler(struct scsi_cmnd *cmd) |
483 | { | 484 | { |
484 | struct domain_device *dev = cmd_to_domain_dev(cmd); | 485 | struct domain_device *dev = cmd_to_domain_dev(cmd); |
485 | struct sas_phy *phy = find_local_sas_phy(dev); | 486 | struct sas_phy *phy = sas_find_local_phy(dev); |
486 | int res; | 487 | int res; |
487 | 488 | ||
488 | res = sas_phy_reset(phy, 1); | 489 | res = sas_phy_reset(phy, 1); |
@@ -497,10 +498,10 @@ int sas_eh_bus_reset_handler(struct scsi_cmnd *cmd) | |||
497 | } | 498 | } |
498 | 499 | ||
499 | /* Try to reset a device */ | 500 | /* Try to reset a device */ |
500 | static int try_to_reset_cmd_device(struct Scsi_Host *shost, | 501 | static int try_to_reset_cmd_device(struct scsi_cmnd *cmd) |
501 | struct scsi_cmnd *cmd) | ||
502 | { | 502 | { |
503 | int res; | 503 | int res; |
504 | struct Scsi_Host *shost = cmd->device->host; | ||
504 | 505 | ||
505 | if (!shost->hostt->eh_device_reset_handler) | 506 | if (!shost->hostt->eh_device_reset_handler) |
506 | goto try_bus_reset; | 507 | goto try_bus_reset; |
@@ -540,6 +541,12 @@ Again: | |||
540 | need_reset = task->task_state_flags & SAS_TASK_NEED_DEV_RESET; | 541 | need_reset = task->task_state_flags & SAS_TASK_NEED_DEV_RESET; |
541 | spin_unlock_irqrestore(&task->task_state_lock, flags); | 542 | spin_unlock_irqrestore(&task->task_state_lock, flags); |
542 | 543 | ||
544 | if (need_reset) { | ||
545 | SAS_DPRINTK("%s: task 0x%p requests reset\n", | ||
546 | __FUNCTION__, task); | ||
547 | goto reset; | ||
548 | } | ||
549 | |||
543 | SAS_DPRINTK("trying to find task 0x%p\n", task); | 550 | SAS_DPRINTK("trying to find task 0x%p\n", task); |
544 | res = sas_scsi_find_task(task); | 551 | res = sas_scsi_find_task(task); |
545 | 552 | ||
@@ -550,18 +557,15 @@ Again: | |||
550 | SAS_DPRINTK("%s: task 0x%p is done\n", __FUNCTION__, | 557 | SAS_DPRINTK("%s: task 0x%p is done\n", __FUNCTION__, |
551 | task); | 558 | task); |
552 | sas_eh_finish_cmd(cmd); | 559 | sas_eh_finish_cmd(cmd); |
553 | if (need_reset) | ||
554 | try_to_reset_cmd_device(shost, cmd); | ||
555 | continue; | 560 | continue; |
556 | case TASK_IS_ABORTED: | 561 | case TASK_IS_ABORTED: |
557 | SAS_DPRINTK("%s: task 0x%p is aborted\n", | 562 | SAS_DPRINTK("%s: task 0x%p is aborted\n", |
558 | __FUNCTION__, task); | 563 | __FUNCTION__, task); |
559 | sas_eh_finish_cmd(cmd); | 564 | sas_eh_finish_cmd(cmd); |
560 | if (need_reset) | ||
561 | try_to_reset_cmd_device(shost, cmd); | ||
562 | continue; | 565 | continue; |
563 | case TASK_IS_AT_LU: | 566 | case TASK_IS_AT_LU: |
564 | SAS_DPRINTK("task 0x%p is at LU: lu recover\n", task); | 567 | SAS_DPRINTK("task 0x%p is at LU: lu recover\n", task); |
568 | reset: | ||
565 | tmf_resp = sas_recover_lu(task->dev, cmd); | 569 | tmf_resp = sas_recover_lu(task->dev, cmd); |
566 | if (tmf_resp == TMF_RESP_FUNC_COMPLETE) { | 570 | if (tmf_resp == TMF_RESP_FUNC_COMPLETE) { |
567 | SAS_DPRINTK("dev %016llx LU %x is " | 571 | SAS_DPRINTK("dev %016llx LU %x is " |
@@ -569,8 +573,6 @@ Again: | |||
569 | SAS_ADDR(task->dev), | 573 | SAS_ADDR(task->dev), |
570 | cmd->device->lun); | 574 | cmd->device->lun); |
571 | sas_eh_finish_cmd(cmd); | 575 | sas_eh_finish_cmd(cmd); |
572 | if (need_reset) | ||
573 | try_to_reset_cmd_device(shost, cmd); | ||
574 | sas_scsi_clear_queue_lu(work_q, cmd); | 576 | sas_scsi_clear_queue_lu(work_q, cmd); |
575 | goto Again; | 577 | goto Again; |
576 | } | 578 | } |
@@ -581,15 +583,15 @@ Again: | |||
581 | task); | 583 | task); |
582 | tmf_resp = sas_recover_I_T(task->dev); | 584 | tmf_resp = sas_recover_I_T(task->dev); |
583 | if (tmf_resp == TMF_RESP_FUNC_COMPLETE) { | 585 | if (tmf_resp == TMF_RESP_FUNC_COMPLETE) { |
586 | struct domain_device *dev = task->dev; | ||
584 | SAS_DPRINTK("I_T %016llx recovered\n", | 587 | SAS_DPRINTK("I_T %016llx recovered\n", |
585 | SAS_ADDR(task->dev->sas_addr)); | 588 | SAS_ADDR(task->dev->sas_addr)); |
586 | sas_eh_finish_cmd(cmd); | 589 | sas_eh_finish_cmd(cmd); |
587 | if (need_reset) | 590 | sas_scsi_clear_queue_I_T(work_q, dev); |
588 | try_to_reset_cmd_device(shost, cmd); | ||
589 | sas_scsi_clear_queue_I_T(work_q, task->dev); | ||
590 | goto Again; | 591 | goto Again; |
591 | } | 592 | } |
592 | /* Hammer time :-) */ | 593 | /* Hammer time :-) */ |
594 | try_to_reset_cmd_device(cmd); | ||
593 | if (i->dft->lldd_clear_nexus_port) { | 595 | if (i->dft->lldd_clear_nexus_port) { |
594 | struct asd_sas_port *port = task->dev->port; | 596 | struct asd_sas_port *port = task->dev->port; |
595 | SAS_DPRINTK("clearing nexus for port:%d\n", | 597 | SAS_DPRINTK("clearing nexus for port:%d\n", |
@@ -599,8 +601,6 @@ Again: | |||
599 | SAS_DPRINTK("clear nexus port:%d " | 601 | SAS_DPRINTK("clear nexus port:%d " |
600 | "succeeded\n", port->id); | 602 | "succeeded\n", port->id); |
601 | sas_eh_finish_cmd(cmd); | 603 | sas_eh_finish_cmd(cmd); |
602 | if (need_reset) | ||
603 | try_to_reset_cmd_device(shost, cmd); | ||
604 | sas_scsi_clear_queue_port(work_q, | 604 | sas_scsi_clear_queue_port(work_q, |
605 | port); | 605 | port); |
606 | goto Again; | 606 | goto Again; |
@@ -613,8 +613,6 @@ Again: | |||
613 | SAS_DPRINTK("clear nexus ha " | 613 | SAS_DPRINTK("clear nexus ha " |
614 | "succeeded\n"); | 614 | "succeeded\n"); |
615 | sas_eh_finish_cmd(cmd); | 615 | sas_eh_finish_cmd(cmd); |
616 | if (need_reset) | ||
617 | try_to_reset_cmd_device(shost, cmd); | ||
618 | goto clear_q; | 616 | goto clear_q; |
619 | } | 617 | } |
620 | } | 618 | } |
@@ -628,8 +626,6 @@ Again: | |||
628 | cmd->device->lun); | 626 | cmd->device->lun); |
629 | 627 | ||
630 | sas_eh_finish_cmd(cmd); | 628 | sas_eh_finish_cmd(cmd); |
631 | if (need_reset) | ||
632 | try_to_reset_cmd_device(shost, cmd); | ||
633 | goto clear_q; | 629 | goto clear_q; |
634 | } | 630 | } |
635 | } | 631 | } |
diff --git a/drivers/scsi/mvsas.c b/drivers/scsi/mvsas.c index d4a6ac3c9c47..5ec0665b3a3d 100644 --- a/drivers/scsi/mvsas.c +++ b/drivers/scsi/mvsas.c | |||
@@ -40,7 +40,7 @@ | |||
40 | #include <asm/io.h> | 40 | #include <asm/io.h> |
41 | 41 | ||
42 | #define DRV_NAME "mvsas" | 42 | #define DRV_NAME "mvsas" |
43 | #define DRV_VERSION "0.5" | 43 | #define DRV_VERSION "0.5.1" |
44 | #define _MV_DUMP 0 | 44 | #define _MV_DUMP 0 |
45 | #define MVS_DISABLE_NVRAM | 45 | #define MVS_DISABLE_NVRAM |
46 | #define MVS_DISABLE_MSI | 46 | #define MVS_DISABLE_MSI |
@@ -1005,7 +1005,7 @@ err_out: | |||
1005 | return rc; | 1005 | return rc; |
1006 | #else | 1006 | #else |
1007 | /* FIXME , For SAS target mode */ | 1007 | /* FIXME , For SAS target mode */ |
1008 | memcpy(buf, "\x00\x00\xab\x11\x30\x04\x05\x50", 8); | 1008 | memcpy(buf, "\x50\x05\x04\x30\x11\xab\x00\x00", 8); |
1009 | return 0; | 1009 | return 0; |
1010 | #endif | 1010 | #endif |
1011 | } | 1011 | } |
@@ -1330,7 +1330,7 @@ static int mvs_int_rx(struct mvs_info *mvi, bool self_clear) | |||
1330 | 1330 | ||
1331 | mvs_hba_cq_dump(mvi); | 1331 | mvs_hba_cq_dump(mvi); |
1332 | 1332 | ||
1333 | if (unlikely(rx_desc & RXQ_DONE)) | 1333 | if (likely(rx_desc & RXQ_DONE)) |
1334 | mvs_slot_complete(mvi, rx_desc); | 1334 | mvs_slot_complete(mvi, rx_desc); |
1335 | if (rx_desc & RXQ_ATTN) { | 1335 | if (rx_desc & RXQ_ATTN) { |
1336 | attn = true; | 1336 | attn = true; |
@@ -2720,9 +2720,8 @@ static int __devinit mvs_hw_init(struct mvs_info *mvi) | |||
2720 | msleep(100); | 2720 | msleep(100); |
2721 | /* init and reset phys */ | 2721 | /* init and reset phys */ |
2722 | for (i = 0; i < mvi->chip->n_phy; i++) { | 2722 | for (i = 0; i < mvi->chip->n_phy; i++) { |
2723 | /* FIXME: is this the correct dword order? */ | 2723 | u32 lo = be32_to_cpu(*(u32 *)&mvi->sas_addr[4]); |
2724 | u32 lo = *((u32 *)&mvi->sas_addr[0]); | 2724 | u32 hi = be32_to_cpu(*(u32 *)&mvi->sas_addr[0]); |
2725 | u32 hi = *((u32 *)&mvi->sas_addr[4]); | ||
2726 | 2725 | ||
2727 | mvs_detect_porttype(mvi, i); | 2726 | mvs_detect_porttype(mvi, i); |
2728 | 2727 | ||
diff --git a/drivers/scsi/ps3rom.c b/drivers/scsi/ps3rom.c index 0cd614a0fa73..fad6cb5cba28 100644 --- a/drivers/scsi/ps3rom.c +++ b/drivers/scsi/ps3rom.c | |||
@@ -124,7 +124,7 @@ static int fill_from_dev_buffer(struct scsi_cmnd *cmd, const void *buf) | |||
124 | } | 124 | } |
125 | req_len += sgpnt->length; | 125 | req_len += sgpnt->length; |
126 | } | 126 | } |
127 | scsi_set_resid(cmd, req_len - act_len); | 127 | scsi_set_resid(cmd, buflen - act_len); |
128 | return 0; | 128 | return 0; |
129 | } | 129 | } |
130 | 130 | ||
@@ -427,7 +427,7 @@ static struct scsi_host_template ps3rom_host_template = { | |||
427 | .cmd_per_lun = 1, | 427 | .cmd_per_lun = 1, |
428 | .emulated = 1, /* only sg driver uses this */ | 428 | .emulated = 1, /* only sg driver uses this */ |
429 | .max_sectors = PS3ROM_MAX_SECTORS, | 429 | .max_sectors = PS3ROM_MAX_SECTORS, |
430 | .use_clustering = ENABLE_CLUSTERING, | 430 | .use_clustering = DISABLE_CLUSTERING, |
431 | .module = THIS_MODULE, | 431 | .module = THIS_MODULE, |
432 | }; | 432 | }; |
433 | 433 | ||
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index 6226d88479f5..c1808763d40e 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c | |||
@@ -39,7 +39,7 @@ qla2x00_prep_ms_iocb(scsi_qla_host_t *ha, uint32_t req_size, uint32_t rsp_size) | |||
39 | ms_pkt->entry_count = 1; | 39 | ms_pkt->entry_count = 1; |
40 | SET_TARGET_ID(ha, ms_pkt->loop_id, SIMPLE_NAME_SERVER); | 40 | SET_TARGET_ID(ha, ms_pkt->loop_id, SIMPLE_NAME_SERVER); |
41 | ms_pkt->control_flags = __constant_cpu_to_le16(CF_READ | CF_HEAD_TAG); | 41 | ms_pkt->control_flags = __constant_cpu_to_le16(CF_READ | CF_HEAD_TAG); |
42 | ms_pkt->timeout = __constant_cpu_to_le16(25); | 42 | ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); |
43 | ms_pkt->cmd_dsd_count = __constant_cpu_to_le16(1); | 43 | ms_pkt->cmd_dsd_count = __constant_cpu_to_le16(1); |
44 | ms_pkt->total_dsd_count = __constant_cpu_to_le16(2); | 44 | ms_pkt->total_dsd_count = __constant_cpu_to_le16(2); |
45 | ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size); | 45 | ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size); |
@@ -75,7 +75,7 @@ qla24xx_prep_ms_iocb(scsi_qla_host_t *ha, uint32_t req_size, uint32_t rsp_size) | |||
75 | ct_pkt->entry_type = CT_IOCB_TYPE; | 75 | ct_pkt->entry_type = CT_IOCB_TYPE; |
76 | ct_pkt->entry_count = 1; | 76 | ct_pkt->entry_count = 1; |
77 | ct_pkt->nport_handle = __constant_cpu_to_le16(NPH_SNS); | 77 | ct_pkt->nport_handle = __constant_cpu_to_le16(NPH_SNS); |
78 | ct_pkt->timeout = __constant_cpu_to_le16(25); | 78 | ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); |
79 | ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1); | 79 | ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1); |
80 | ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1); | 80 | ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1); |
81 | ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size); | 81 | ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size); |
@@ -1144,7 +1144,7 @@ qla2x00_prep_ms_fdmi_iocb(scsi_qla_host_t *ha, uint32_t req_size, | |||
1144 | ms_pkt->entry_count = 1; | 1144 | ms_pkt->entry_count = 1; |
1145 | SET_TARGET_ID(ha, ms_pkt->loop_id, ha->mgmt_svr_loop_id); | 1145 | SET_TARGET_ID(ha, ms_pkt->loop_id, ha->mgmt_svr_loop_id); |
1146 | ms_pkt->control_flags = __constant_cpu_to_le16(CF_READ | CF_HEAD_TAG); | 1146 | ms_pkt->control_flags = __constant_cpu_to_le16(CF_READ | CF_HEAD_TAG); |
1147 | ms_pkt->timeout = __constant_cpu_to_le16(59); | 1147 | ms_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); |
1148 | ms_pkt->cmd_dsd_count = __constant_cpu_to_le16(1); | 1148 | ms_pkt->cmd_dsd_count = __constant_cpu_to_le16(1); |
1149 | ms_pkt->total_dsd_count = __constant_cpu_to_le16(2); | 1149 | ms_pkt->total_dsd_count = __constant_cpu_to_le16(2); |
1150 | ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size); | 1150 | ms_pkt->rsp_bytecount = cpu_to_le32(rsp_size); |
@@ -1181,7 +1181,7 @@ qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *ha, uint32_t req_size, | |||
1181 | ct_pkt->entry_type = CT_IOCB_TYPE; | 1181 | ct_pkt->entry_type = CT_IOCB_TYPE; |
1182 | ct_pkt->entry_count = 1; | 1182 | ct_pkt->entry_count = 1; |
1183 | ct_pkt->nport_handle = cpu_to_le16(ha->mgmt_svr_loop_id); | 1183 | ct_pkt->nport_handle = cpu_to_le16(ha->mgmt_svr_loop_id); |
1184 | ct_pkt->timeout = __constant_cpu_to_le16(59); | 1184 | ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); |
1185 | ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1); | 1185 | ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1); |
1186 | ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1); | 1186 | ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1); |
1187 | ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size); | 1187 | ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size); |
@@ -1761,7 +1761,7 @@ qla24xx_prep_ms_fm_iocb(scsi_qla_host_t *ha, uint32_t req_size, | |||
1761 | ct_pkt->entry_type = CT_IOCB_TYPE; | 1761 | ct_pkt->entry_type = CT_IOCB_TYPE; |
1762 | ct_pkt->entry_count = 1; | 1762 | ct_pkt->entry_count = 1; |
1763 | ct_pkt->nport_handle = cpu_to_le16(ha->mgmt_svr_loop_id); | 1763 | ct_pkt->nport_handle = cpu_to_le16(ha->mgmt_svr_loop_id); |
1764 | ct_pkt->timeout = __constant_cpu_to_le16(59); | 1764 | ct_pkt->timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); |
1765 | ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1); | 1765 | ct_pkt->cmd_dsd_count = __constant_cpu_to_le16(1); |
1766 | ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1); | 1766 | ct_pkt->rsp_dsd_count = __constant_cpu_to_le16(1); |
1767 | ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size); | 1767 | ct_pkt->rsp_byte_count = cpu_to_le32(rsp_size); |
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index d5c7853e7eba..364be7d06875 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c | |||
@@ -1733,8 +1733,8 @@ qla2x00_nvram_config(scsi_qla_host_t *ha) | |||
1733 | ha->login_timeout = nv->login_timeout; | 1733 | ha->login_timeout = nv->login_timeout; |
1734 | icb->login_timeout = nv->login_timeout; | 1734 | icb->login_timeout = nv->login_timeout; |
1735 | 1735 | ||
1736 | /* Set minimum RATOV to 200 tenths of a second. */ | 1736 | /* Set minimum RATOV to 100 tenths of a second. */ |
1737 | ha->r_a_tov = 200; | 1737 | ha->r_a_tov = 100; |
1738 | 1738 | ||
1739 | ha->loop_reset_delay = nv->reset_delay; | 1739 | ha->loop_reset_delay = nv->reset_delay; |
1740 | 1740 | ||
@@ -3645,8 +3645,8 @@ qla24xx_nvram_config(scsi_qla_host_t *ha) | |||
3645 | ha->login_timeout = le16_to_cpu(nv->login_timeout); | 3645 | ha->login_timeout = le16_to_cpu(nv->login_timeout); |
3646 | icb->login_timeout = cpu_to_le16(nv->login_timeout); | 3646 | icb->login_timeout = cpu_to_le16(nv->login_timeout); |
3647 | 3647 | ||
3648 | /* Set minimum RATOV to 200 tenths of a second. */ | 3648 | /* Set minimum RATOV to 100 tenths of a second. */ |
3649 | ha->r_a_tov = 200; | 3649 | ha->r_a_tov = 100; |
3650 | 3650 | ||
3651 | ha->loop_reset_delay = nv->reset_delay; | 3651 | ha->loop_reset_delay = nv->reset_delay; |
3652 | 3652 | ||
@@ -4022,7 +4022,8 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *ha) | |||
4022 | return; | 4022 | return; |
4023 | 4023 | ||
4024 | ret = qla2x00_stop_firmware(ha); | 4024 | ret = qla2x00_stop_firmware(ha); |
4025 | for (retries = 5; ret != QLA_SUCCESS && retries ; retries--) { | 4025 | for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT && |
4026 | retries ; retries--) { | ||
4026 | qla2x00_reset_chip(ha); | 4027 | qla2x00_reset_chip(ha); |
4027 | if (qla2x00_chip_diag(ha) != QLA_SUCCESS) | 4028 | if (qla2x00_chip_diag(ha) != QLA_SUCCESS) |
4028 | continue; | 4029 | continue; |
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 14e6f22944b7..f0337036c7bb 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c | |||
@@ -958,6 +958,11 @@ qla2x00_status_entry(scsi_qla_host_t *ha, void *pkt) | |||
958 | } | 958 | } |
959 | } | 959 | } |
960 | 960 | ||
961 | /* Check for overrun. */ | ||
962 | if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE && | ||
963 | scsi_status & SS_RESIDUAL_OVER) | ||
964 | comp_status = CS_DATA_OVERRUN; | ||
965 | |||
961 | /* | 966 | /* |
962 | * Based on Host and scsi status generate status code for Linux | 967 | * Based on Host and scsi status generate status code for Linux |
963 | */ | 968 | */ |
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 99d29fff836d..bb103580e1ba 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c | |||
@@ -2206,7 +2206,7 @@ qla24xx_abort_target(fc_port_t *fcport) | |||
2206 | tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE; | 2206 | tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE; |
2207 | tsk->p.tsk.entry_count = 1; | 2207 | tsk->p.tsk.entry_count = 1; |
2208 | tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id); | 2208 | tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id); |
2209 | tsk->p.tsk.timeout = __constant_cpu_to_le16(25); | 2209 | tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2); |
2210 | tsk->p.tsk.control_flags = __constant_cpu_to_le32(TCF_TARGET_RESET); | 2210 | tsk->p.tsk.control_flags = __constant_cpu_to_le32(TCF_TARGET_RESET); |
2211 | tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa; | 2211 | tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa; |
2212 | tsk->p.tsk.port_id[1] = fcport->d_id.b.area; | 2212 | tsk->p.tsk.port_id[1] = fcport->d_id.b.area; |
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index c5742cc15abb..ea08a129fee9 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h | |||
@@ -7,7 +7,7 @@ | |||
7 | /* | 7 | /* |
8 | * Driver version | 8 | * Driver version |
9 | */ | 9 | */ |
10 | #define QLA2XXX_VERSION "8.02.00-k8" | 10 | #define QLA2XXX_VERSION "8.02.00-k9" |
11 | 11 | ||
12 | #define QLA_DRIVER_MAJOR_VER 8 | 12 | #define QLA_DRIVER_MAJOR_VER 8 |
13 | #define QLA_DRIVER_MINOR_VER 2 | 13 | #define QLA_DRIVER_MINOR_VER 2 |
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c index 10b3b9a620f3..109c5f5985ec 100644 --- a/drivers/scsi/qla4xxx/ql4_init.c +++ b/drivers/scsi/qla4xxx/ql4_init.c | |||
@@ -1299,9 +1299,9 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, | |||
1299 | ddb_entry->fw_ddb_device_state = state; | 1299 | ddb_entry->fw_ddb_device_state = state; |
1300 | /* Device is back online. */ | 1300 | /* Device is back online. */ |
1301 | if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) { | 1301 | if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) { |
1302 | atomic_set(&ddb_entry->state, DDB_STATE_ONLINE); | ||
1302 | atomic_set(&ddb_entry->port_down_timer, | 1303 | atomic_set(&ddb_entry->port_down_timer, |
1303 | ha->port_down_retry_count); | 1304 | ha->port_down_retry_count); |
1304 | atomic_set(&ddb_entry->state, DDB_STATE_ONLINE); | ||
1305 | atomic_set(&ddb_entry->relogin_retry_count, 0); | 1305 | atomic_set(&ddb_entry->relogin_retry_count, 0); |
1306 | atomic_set(&ddb_entry->relogin_timer, 0); | 1306 | atomic_set(&ddb_entry->relogin_timer, 0); |
1307 | clear_bit(DF_RELOGIN, &ddb_entry->flags); | 1307 | clear_bit(DF_RELOGIN, &ddb_entry->flags); |
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index c3c59d763037..8b92f348f02c 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c | |||
@@ -75,6 +75,7 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd); | |||
75 | static int qla4xxx_slave_alloc(struct scsi_device *device); | 75 | static int qla4xxx_slave_alloc(struct scsi_device *device); |
76 | static int qla4xxx_slave_configure(struct scsi_device *device); | 76 | static int qla4xxx_slave_configure(struct scsi_device *device); |
77 | static void qla4xxx_slave_destroy(struct scsi_device *sdev); | 77 | static void qla4xxx_slave_destroy(struct scsi_device *sdev); |
78 | static void qla4xxx_scan_start(struct Scsi_Host *shost); | ||
78 | 79 | ||
79 | static struct scsi_host_template qla4xxx_driver_template = { | 80 | static struct scsi_host_template qla4xxx_driver_template = { |
80 | .module = THIS_MODULE, | 81 | .module = THIS_MODULE, |
@@ -90,6 +91,7 @@ static struct scsi_host_template qla4xxx_driver_template = { | |||
90 | .slave_destroy = qla4xxx_slave_destroy, | 91 | .slave_destroy = qla4xxx_slave_destroy, |
91 | 92 | ||
92 | .scan_finished = iscsi_scan_finished, | 93 | .scan_finished = iscsi_scan_finished, |
94 | .scan_start = qla4xxx_scan_start, | ||
93 | 95 | ||
94 | .this_id = -1, | 96 | .this_id = -1, |
95 | .cmd_per_lun = 3, | 97 | .cmd_per_lun = 3, |
@@ -299,6 +301,18 @@ struct ddb_entry *qla4xxx_alloc_sess(struct scsi_qla_host *ha) | |||
299 | return ddb_entry; | 301 | return ddb_entry; |
300 | } | 302 | } |
301 | 303 | ||
304 | static void qla4xxx_scan_start(struct Scsi_Host *shost) | ||
305 | { | ||
306 | struct scsi_qla_host *ha = shost_priv(shost); | ||
307 | struct ddb_entry *ddb_entry, *ddbtemp; | ||
308 | |||
309 | /* finish setup of sessions that were already setup in firmware */ | ||
310 | list_for_each_entry_safe(ddb_entry, ddbtemp, &ha->ddb_list, list) { | ||
311 | if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) | ||
312 | qla4xxx_add_sess(ddb_entry); | ||
313 | } | ||
314 | } | ||
315 | |||
302 | /* | 316 | /* |
303 | * Timer routines | 317 | * Timer routines |
304 | */ | 318 | */ |
@@ -864,8 +878,9 @@ static void qla4xxx_flush_active_srbs(struct scsi_qla_host *ha) | |||
864 | * qla4xxx_recover_adapter - recovers adapter after a fatal error | 878 | * qla4xxx_recover_adapter - recovers adapter after a fatal error |
865 | * @ha: Pointer to host adapter structure. | 879 | * @ha: Pointer to host adapter structure. |
866 | * @renew_ddb_list: Indicates what to do with the adapter's ddb list | 880 | * @renew_ddb_list: Indicates what to do with the adapter's ddb list |
867 | * after adapter recovery has completed. | 881 | * |
868 | * 0=preserve ddb list, 1=destroy and rebuild ddb list | 882 | * renew_ddb_list value can be 0=preserve ddb list, 1=destroy and rebuild |
883 | * ddb list. | ||
869 | **/ | 884 | **/ |
870 | static int qla4xxx_recover_adapter(struct scsi_qla_host *ha, | 885 | static int qla4xxx_recover_adapter(struct scsi_qla_host *ha, |
871 | uint8_t renew_ddb_list) | 886 | uint8_t renew_ddb_list) |
@@ -874,6 +889,7 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha, | |||
874 | 889 | ||
875 | /* Stall incoming I/O until we are done */ | 890 | /* Stall incoming I/O until we are done */ |
876 | clear_bit(AF_ONLINE, &ha->flags); | 891 | clear_bit(AF_ONLINE, &ha->flags); |
892 | |||
877 | DEBUG2(printk("scsi%ld: %s calling qla4xxx_cmd_wait\n", ha->host_no, | 893 | DEBUG2(printk("scsi%ld: %s calling qla4xxx_cmd_wait\n", ha->host_no, |
878 | __func__)); | 894 | __func__)); |
879 | 895 | ||
@@ -1176,7 +1192,6 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev, | |||
1176 | int ret = -ENODEV, status; | 1192 | int ret = -ENODEV, status; |
1177 | struct Scsi_Host *host; | 1193 | struct Scsi_Host *host; |
1178 | struct scsi_qla_host *ha; | 1194 | struct scsi_qla_host *ha; |
1179 | struct ddb_entry *ddb_entry, *ddbtemp; | ||
1180 | uint8_t init_retry_count = 0; | 1195 | uint8_t init_retry_count = 0; |
1181 | char buf[34]; | 1196 | char buf[34]; |
1182 | 1197 | ||
@@ -1295,13 +1310,6 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev, | |||
1295 | if (ret) | 1310 | if (ret) |
1296 | goto probe_failed; | 1311 | goto probe_failed; |
1297 | 1312 | ||
1298 | /* Update transport device information for all devices. */ | ||
1299 | list_for_each_entry_safe(ddb_entry, ddbtemp, &ha->ddb_list, list) { | ||
1300 | if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) | ||
1301 | if (qla4xxx_add_sess(ddb_entry)) | ||
1302 | goto remove_host; | ||
1303 | } | ||
1304 | |||
1305 | printk(KERN_INFO | 1313 | printk(KERN_INFO |
1306 | " QLogic iSCSI HBA Driver version: %s\n" | 1314 | " QLogic iSCSI HBA Driver version: %s\n" |
1307 | " QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n", | 1315 | " QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n", |
@@ -1311,10 +1319,6 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev, | |||
1311 | scsi_scan_host(host); | 1319 | scsi_scan_host(host); |
1312 | return 0; | 1320 | return 0; |
1313 | 1321 | ||
1314 | remove_host: | ||
1315 | qla4xxx_free_ddb_list(ha); | ||
1316 | scsi_remove_host(host); | ||
1317 | |||
1318 | probe_failed: | 1322 | probe_failed: |
1319 | qla4xxx_free_adapter(ha); | 1323 | qla4xxx_free_adapter(ha); |
1320 | scsi_host_put(ha->host); | 1324 | scsi_host_put(ha->host); |
@@ -1600,9 +1604,12 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd) | |||
1600 | return FAILED; | 1604 | return FAILED; |
1601 | } | 1605 | } |
1602 | 1606 | ||
1603 | if (qla4xxx_recover_adapter(ha, PRESERVE_DDB_LIST) == QLA_SUCCESS) { | 1607 | /* make sure the dpc thread is stopped while we reset the hba */ |
1608 | clear_bit(AF_ONLINE, &ha->flags); | ||
1609 | flush_workqueue(ha->dpc_thread); | ||
1610 | |||
1611 | if (qla4xxx_recover_adapter(ha, PRESERVE_DDB_LIST) == QLA_SUCCESS) | ||
1604 | return_status = SUCCESS; | 1612 | return_status = SUCCESS; |
1605 | } | ||
1606 | 1613 | ||
1607 | dev_info(&ha->pdev->dev, "HOST RESET %s.\n", | 1614 | dev_info(&ha->pdev->dev, "HOST RESET %s.\n", |
1608 | return_status == FAILED ? "FAILED" : "SUCCEDED"); | 1615 | return_status == FAILED ? "FAILED" : "SUCCEDED"); |
diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c index 3677fbb30b72..a0f308bd145b 100644 --- a/drivers/scsi/scsi_tgt_lib.c +++ b/drivers/scsi/scsi_tgt_lib.c | |||
@@ -103,7 +103,6 @@ struct scsi_cmnd *scsi_host_get_command(struct Scsi_Host *shost, | |||
103 | if (!cmd) | 103 | if (!cmd) |
104 | goto release_rq; | 104 | goto release_rq; |
105 | 105 | ||
106 | memset(cmd, 0, sizeof(*cmd)); | ||
107 | cmd->sc_data_direction = data_dir; | 106 | cmd->sc_data_direction = data_dir; |
108 | cmd->jiffies_at_alloc = jiffies; | 107 | cmd->jiffies_at_alloc = jiffies; |
109 | cmd->request = rq; | 108 | cmd->request = rq; |
@@ -382,6 +381,11 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd, | |||
382 | scsi_release_buffers(cmd); | 381 | scsi_release_buffers(cmd); |
383 | goto unmap_rq; | 382 | goto unmap_rq; |
384 | } | 383 | } |
384 | /* | ||
385 | * we use REQ_TYPE_BLOCK_PC so scsi_init_io doesn't set the | ||
386 | * length for us. | ||
387 | */ | ||
388 | cmd->sdb.length = rq->data_len; | ||
385 | 389 | ||
386 | return 0; | 390 | return 0; |
387 | 391 | ||
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 9981682d5302..ca7bb6f63bde 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c | |||
@@ -33,7 +33,7 @@ | |||
33 | #define ISCSI_SESSION_ATTRS 19 | 33 | #define ISCSI_SESSION_ATTRS 19 |
34 | #define ISCSI_CONN_ATTRS 13 | 34 | #define ISCSI_CONN_ATTRS 13 |
35 | #define ISCSI_HOST_ATTRS 4 | 35 | #define ISCSI_HOST_ATTRS 4 |
36 | #define ISCSI_TRANSPORT_VERSION "2.0-868" | 36 | #define ISCSI_TRANSPORT_VERSION "2.0-869" |
37 | 37 | ||
38 | struct iscsi_internal { | 38 | struct iscsi_internal { |
39 | int daemon_pid; | 39 | int daemon_pid; |
@@ -373,24 +373,25 @@ static void session_recovery_timedout(struct work_struct *work) | |||
373 | scsi_target_unblock(&session->dev); | 373 | scsi_target_unblock(&session->dev); |
374 | } | 374 | } |
375 | 375 | ||
376 | static void __iscsi_unblock_session(struct iscsi_cls_session *session) | 376 | static void __iscsi_unblock_session(struct work_struct *work) |
377 | { | ||
378 | if (!cancel_delayed_work(&session->recovery_work)) | ||
379 | flush_workqueue(iscsi_eh_timer_workq); | ||
380 | scsi_target_unblock(&session->dev); | ||
381 | } | ||
382 | |||
383 | void iscsi_unblock_session(struct iscsi_cls_session *session) | ||
384 | { | 377 | { |
378 | struct iscsi_cls_session *session = | ||
379 | container_of(work, struct iscsi_cls_session, | ||
380 | unblock_work); | ||
385 | struct Scsi_Host *shost = iscsi_session_to_shost(session); | 381 | struct Scsi_Host *shost = iscsi_session_to_shost(session); |
386 | struct iscsi_host *ihost = shost->shost_data; | 382 | struct iscsi_host *ihost = shost->shost_data; |
387 | unsigned long flags; | 383 | unsigned long flags; |
388 | 384 | ||
385 | /* | ||
386 | * The recovery and unblock work get run from the same workqueue, | ||
387 | * so try to cancel it if it was going to run after this unblock. | ||
388 | */ | ||
389 | cancel_delayed_work(&session->recovery_work); | ||
389 | spin_lock_irqsave(&session->lock, flags); | 390 | spin_lock_irqsave(&session->lock, flags); |
390 | session->state = ISCSI_SESSION_LOGGED_IN; | 391 | session->state = ISCSI_SESSION_LOGGED_IN; |
391 | spin_unlock_irqrestore(&session->lock, flags); | 392 | spin_unlock_irqrestore(&session->lock, flags); |
392 | 393 | /* start IO */ | |
393 | __iscsi_unblock_session(session); | 394 | scsi_target_unblock(&session->dev); |
394 | /* | 395 | /* |
395 | * Only do kernel scanning if the driver is properly hooked into | 396 | * Only do kernel scanning if the driver is properly hooked into |
396 | * the async scanning code (drivers like iscsi_tcp do login and | 397 | * the async scanning code (drivers like iscsi_tcp do login and |
@@ -401,20 +402,43 @@ void iscsi_unblock_session(struct iscsi_cls_session *session) | |||
401 | atomic_inc(&ihost->nr_scans); | 402 | atomic_inc(&ihost->nr_scans); |
402 | } | 403 | } |
403 | } | 404 | } |
405 | |||
406 | /** | ||
407 | * iscsi_unblock_session - set a session as logged in and start IO. | ||
408 | * @session: iscsi session | ||
409 | * | ||
410 | * Mark a session as ready to accept IO. | ||
411 | */ | ||
412 | void iscsi_unblock_session(struct iscsi_cls_session *session) | ||
413 | { | ||
414 | queue_work(iscsi_eh_timer_workq, &session->unblock_work); | ||
415 | /* | ||
416 | * make sure all the events have completed before tell the driver | ||
417 | * it is safe | ||
418 | */ | ||
419 | flush_workqueue(iscsi_eh_timer_workq); | ||
420 | } | ||
404 | EXPORT_SYMBOL_GPL(iscsi_unblock_session); | 421 | EXPORT_SYMBOL_GPL(iscsi_unblock_session); |
405 | 422 | ||
406 | void iscsi_block_session(struct iscsi_cls_session *session) | 423 | static void __iscsi_block_session(struct work_struct *work) |
407 | { | 424 | { |
425 | struct iscsi_cls_session *session = | ||
426 | container_of(work, struct iscsi_cls_session, | ||
427 | block_work); | ||
408 | unsigned long flags; | 428 | unsigned long flags; |
409 | 429 | ||
410 | spin_lock_irqsave(&session->lock, flags); | 430 | spin_lock_irqsave(&session->lock, flags); |
411 | session->state = ISCSI_SESSION_FAILED; | 431 | session->state = ISCSI_SESSION_FAILED; |
412 | spin_unlock_irqrestore(&session->lock, flags); | 432 | spin_unlock_irqrestore(&session->lock, flags); |
413 | |||
414 | scsi_target_block(&session->dev); | 433 | scsi_target_block(&session->dev); |
415 | queue_delayed_work(iscsi_eh_timer_workq, &session->recovery_work, | 434 | queue_delayed_work(iscsi_eh_timer_workq, &session->recovery_work, |
416 | session->recovery_tmo * HZ); | 435 | session->recovery_tmo * HZ); |
417 | } | 436 | } |
437 | |||
438 | void iscsi_block_session(struct iscsi_cls_session *session) | ||
439 | { | ||
440 | queue_work(iscsi_eh_timer_workq, &session->block_work); | ||
441 | } | ||
418 | EXPORT_SYMBOL_GPL(iscsi_block_session); | 442 | EXPORT_SYMBOL_GPL(iscsi_block_session); |
419 | 443 | ||
420 | static void __iscsi_unbind_session(struct work_struct *work) | 444 | static void __iscsi_unbind_session(struct work_struct *work) |
@@ -463,6 +487,8 @@ iscsi_alloc_session(struct Scsi_Host *shost, | |||
463 | INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout); | 487 | INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout); |
464 | INIT_LIST_HEAD(&session->host_list); | 488 | INIT_LIST_HEAD(&session->host_list); |
465 | INIT_LIST_HEAD(&session->sess_list); | 489 | INIT_LIST_HEAD(&session->sess_list); |
490 | INIT_WORK(&session->unblock_work, __iscsi_unblock_session); | ||
491 | INIT_WORK(&session->block_work, __iscsi_block_session); | ||
466 | INIT_WORK(&session->unbind_work, __iscsi_unbind_session); | 492 | INIT_WORK(&session->unbind_work, __iscsi_unbind_session); |
467 | INIT_WORK(&session->scan_work, iscsi_scan_session); | 493 | INIT_WORK(&session->scan_work, iscsi_scan_session); |
468 | spin_lock_init(&session->lock); | 494 | spin_lock_init(&session->lock); |
@@ -575,24 +601,25 @@ void iscsi_remove_session(struct iscsi_cls_session *session) | |||
575 | list_del(&session->sess_list); | 601 | list_del(&session->sess_list); |
576 | spin_unlock_irqrestore(&sesslock, flags); | 602 | spin_unlock_irqrestore(&sesslock, flags); |
577 | 603 | ||
604 | /* make sure there are no blocks/unblocks queued */ | ||
605 | flush_workqueue(iscsi_eh_timer_workq); | ||
606 | /* make sure the timedout callout is not running */ | ||
607 | if (!cancel_delayed_work(&session->recovery_work)) | ||
608 | flush_workqueue(iscsi_eh_timer_workq); | ||
578 | /* | 609 | /* |
579 | * If we are blocked let commands flow again. The lld or iscsi | 610 | * If we are blocked let commands flow again. The lld or iscsi |
580 | * layer should set up the queuecommand to fail commands. | 611 | * layer should set up the queuecommand to fail commands. |
612 | * We assume that LLD will not be calling block/unblock while | ||
613 | * removing the session. | ||
581 | */ | 614 | */ |
582 | spin_lock_irqsave(&session->lock, flags); | 615 | spin_lock_irqsave(&session->lock, flags); |
583 | session->state = ISCSI_SESSION_FREE; | 616 | session->state = ISCSI_SESSION_FREE; |
584 | spin_unlock_irqrestore(&session->lock, flags); | 617 | spin_unlock_irqrestore(&session->lock, flags); |
585 | __iscsi_unblock_session(session); | ||
586 | __iscsi_unbind_session(&session->unbind_work); | ||
587 | 618 | ||
588 | /* flush running scans */ | 619 | scsi_target_unblock(&session->dev); |
620 | /* flush running scans then delete devices */ | ||
589 | flush_workqueue(ihost->scan_workq); | 621 | flush_workqueue(ihost->scan_workq); |
590 | /* | 622 | __iscsi_unbind_session(&session->unbind_work); |
591 | * If the session dropped while removing devices then we need to make | ||
592 | * sure it is not blocked | ||
593 | */ | ||
594 | if (!cancel_delayed_work(&session->recovery_work)) | ||
595 | flush_workqueue(iscsi_eh_timer_workq); | ||
596 | 623 | ||
597 | /* hw iscsi may not have removed all connections from session */ | 624 | /* hw iscsi may not have removed all connections from session */ |
598 | err = device_for_each_child(&session->dev, NULL, | 625 | err = device_for_each_child(&session->dev, NULL, |
@@ -802,23 +829,16 @@ EXPORT_SYMBOL_GPL(iscsi_recv_pdu); | |||
802 | 829 | ||
803 | void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error) | 830 | void iscsi_conn_error(struct iscsi_cls_conn *conn, enum iscsi_err error) |
804 | { | 831 | { |
805 | struct iscsi_cls_session *session = iscsi_conn_to_session(conn); | ||
806 | struct nlmsghdr *nlh; | 832 | struct nlmsghdr *nlh; |
807 | struct sk_buff *skb; | 833 | struct sk_buff *skb; |
808 | struct iscsi_uevent *ev; | 834 | struct iscsi_uevent *ev; |
809 | struct iscsi_internal *priv; | 835 | struct iscsi_internal *priv; |
810 | int len = NLMSG_SPACE(sizeof(*ev)); | 836 | int len = NLMSG_SPACE(sizeof(*ev)); |
811 | unsigned long flags; | ||
812 | 837 | ||
813 | priv = iscsi_if_transport_lookup(conn->transport); | 838 | priv = iscsi_if_transport_lookup(conn->transport); |
814 | if (!priv) | 839 | if (!priv) |
815 | return; | 840 | return; |
816 | 841 | ||
817 | spin_lock_irqsave(&session->lock, flags); | ||
818 | if (session->state == ISCSI_SESSION_LOGGED_IN) | ||
819 | session->state = ISCSI_SESSION_FAILED; | ||
820 | spin_unlock_irqrestore(&session->lock, flags); | ||
821 | |||
822 | skb = alloc_skb(len, GFP_ATOMIC); | 842 | skb = alloc_skb(len, GFP_ATOMIC); |
823 | if (!skb) { | 843 | if (!skb) { |
824 | iscsi_cls_conn_printk(KERN_ERR, conn, "gracefully ignored " | 844 | iscsi_cls_conn_printk(KERN_ERR, conn, "gracefully ignored " |
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index 0f5619611b8d..931992763e68 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h | |||
@@ -3,6 +3,7 @@ | |||
3 | */ | 3 | */ |
4 | 4 | ||
5 | #include <linux/mount.h> | 5 | #include <linux/mount.h> |
6 | #include <linux/security.h> | ||
6 | 7 | ||
7 | struct nfs_string; | 8 | struct nfs_string; |
8 | 9 | ||
@@ -57,6 +58,8 @@ struct nfs_parsed_mount_data { | |||
57 | char *export_path; | 58 | char *export_path; |
58 | int protocol; | 59 | int protocol; |
59 | } nfs_server; | 60 | } nfs_server; |
61 | |||
62 | struct security_mnt_opts lsm_opts; | ||
60 | }; | 63 | }; |
61 | 64 | ||
62 | /* client.c */ | 65 | /* client.c */ |
diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 1fb381843650..fcf4b982c885 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c | |||
@@ -684,8 +684,9 @@ static void nfs_parse_server_address(char *value, | |||
684 | static int nfs_parse_mount_options(char *raw, | 684 | static int nfs_parse_mount_options(char *raw, |
685 | struct nfs_parsed_mount_data *mnt) | 685 | struct nfs_parsed_mount_data *mnt) |
686 | { | 686 | { |
687 | char *p, *string; | 687 | char *p, *string, *secdata; |
688 | unsigned short port = 0; | 688 | unsigned short port = 0; |
689 | int rc; | ||
689 | 690 | ||
690 | if (!raw) { | 691 | if (!raw) { |
691 | dfprintk(MOUNT, "NFS: mount options string was NULL.\n"); | 692 | dfprintk(MOUNT, "NFS: mount options string was NULL.\n"); |
@@ -693,6 +694,20 @@ static int nfs_parse_mount_options(char *raw, | |||
693 | } | 694 | } |
694 | dfprintk(MOUNT, "NFS: nfs mount opts='%s'\n", raw); | 695 | dfprintk(MOUNT, "NFS: nfs mount opts='%s'\n", raw); |
695 | 696 | ||
697 | secdata = alloc_secdata(); | ||
698 | if (!secdata) | ||
699 | goto out_nomem; | ||
700 | |||
701 | rc = security_sb_copy_data(raw, secdata); | ||
702 | if (rc) | ||
703 | goto out_security_failure; | ||
704 | |||
705 | rc = security_sb_parse_opts_str(secdata, &mnt->lsm_opts); | ||
706 | if (rc) | ||
707 | goto out_security_failure; | ||
708 | |||
709 | free_secdata(secdata); | ||
710 | |||
696 | while ((p = strsep(&raw, ",")) != NULL) { | 711 | while ((p = strsep(&raw, ",")) != NULL) { |
697 | substring_t args[MAX_OPT_ARGS]; | 712 | substring_t args[MAX_OPT_ARGS]; |
698 | int option, token; | 713 | int option, token; |
@@ -1042,7 +1057,10 @@ static int nfs_parse_mount_options(char *raw, | |||
1042 | out_nomem: | 1057 | out_nomem: |
1043 | printk(KERN_INFO "NFS: not enough memory to parse option\n"); | 1058 | printk(KERN_INFO "NFS: not enough memory to parse option\n"); |
1044 | return 0; | 1059 | return 0; |
1045 | 1060 | out_security_failure: | |
1061 | free_secdata(secdata); | ||
1062 | printk(KERN_INFO "NFS: security options invalid: %d\n", rc); | ||
1063 | return 0; | ||
1046 | out_unrec_vers: | 1064 | out_unrec_vers: |
1047 | printk(KERN_INFO "NFS: unrecognized NFS version number\n"); | 1065 | printk(KERN_INFO "NFS: unrecognized NFS version number\n"); |
1048 | return 0; | 1066 | return 0; |
@@ -1214,6 +1232,33 @@ static int nfs_validate_mount_data(void *options, | |||
1214 | args->namlen = data->namlen; | 1232 | args->namlen = data->namlen; |
1215 | args->bsize = data->bsize; | 1233 | args->bsize = data->bsize; |
1216 | args->auth_flavors[0] = data->pseudoflavor; | 1234 | args->auth_flavors[0] = data->pseudoflavor; |
1235 | |||
1236 | /* | ||
1237 | * The legacy version 6 binary mount data from userspace has a | ||
1238 | * field used only to transport selinux information into the | ||
1239 | * the kernel. To continue to support that functionality we | ||
1240 | * have a touch of selinux knowledge here in the NFS code. The | ||
1241 | * userspace code converted context=blah to just blah so we are | ||
1242 | * converting back to the full string selinux understands. | ||
1243 | */ | ||
1244 | if (data->context[0]){ | ||
1245 | #ifdef CONFIG_SECURITY_SELINUX | ||
1246 | int rc; | ||
1247 | char *opts_str = kmalloc(sizeof(data->context) + 8, GFP_KERNEL); | ||
1248 | if (!opts_str) | ||
1249 | return -ENOMEM; | ||
1250 | strcpy(opts_str, "context="); | ||
1251 | data->context[NFS_MAX_CONTEXT_LEN] = '\0'; | ||
1252 | strcat(opts_str, &data->context[0]); | ||
1253 | rc = security_sb_parse_opts_str(opts_str, &args->lsm_opts); | ||
1254 | kfree(opts_str); | ||
1255 | if (rc) | ||
1256 | return rc; | ||
1257 | #else | ||
1258 | return -EINVAL; | ||
1259 | #endif | ||
1260 | } | ||
1261 | |||
1217 | break; | 1262 | break; |
1218 | default: { | 1263 | default: { |
1219 | unsigned int len; | 1264 | unsigned int len; |
@@ -1476,6 +1521,8 @@ static int nfs_get_sb(struct file_system_type *fs_type, | |||
1476 | }; | 1521 | }; |
1477 | int error; | 1522 | int error; |
1478 | 1523 | ||
1524 | security_init_mnt_opts(&data.lsm_opts); | ||
1525 | |||
1479 | /* Validate the mount data */ | 1526 | /* Validate the mount data */ |
1480 | error = nfs_validate_mount_data(raw_data, &data, &mntfh, dev_name); | 1527 | error = nfs_validate_mount_data(raw_data, &data, &mntfh, dev_name); |
1481 | if (error < 0) | 1528 | if (error < 0) |
@@ -1515,6 +1562,10 @@ static int nfs_get_sb(struct file_system_type *fs_type, | |||
1515 | goto error_splat_super; | 1562 | goto error_splat_super; |
1516 | } | 1563 | } |
1517 | 1564 | ||
1565 | error = security_sb_set_mnt_opts(s, &data.lsm_opts); | ||
1566 | if (error) | ||
1567 | goto error_splat_root; | ||
1568 | |||
1518 | s->s_flags |= MS_ACTIVE; | 1569 | s->s_flags |= MS_ACTIVE; |
1519 | mnt->mnt_sb = s; | 1570 | mnt->mnt_sb = s; |
1520 | mnt->mnt_root = mntroot; | 1571 | mnt->mnt_root = mntroot; |
@@ -1523,12 +1574,15 @@ static int nfs_get_sb(struct file_system_type *fs_type, | |||
1523 | out: | 1574 | out: |
1524 | kfree(data.nfs_server.hostname); | 1575 | kfree(data.nfs_server.hostname); |
1525 | kfree(data.mount_server.hostname); | 1576 | kfree(data.mount_server.hostname); |
1577 | security_free_mnt_opts(&data.lsm_opts); | ||
1526 | return error; | 1578 | return error; |
1527 | 1579 | ||
1528 | out_err_nosb: | 1580 | out_err_nosb: |
1529 | nfs_free_server(server); | 1581 | nfs_free_server(server); |
1530 | goto out; | 1582 | goto out; |
1531 | 1583 | ||
1584 | error_splat_root: | ||
1585 | dput(mntroot); | ||
1532 | error_splat_super: | 1586 | error_splat_super: |
1533 | up_write(&s->s_umount); | 1587 | up_write(&s->s_umount); |
1534 | deactivate_super(s); | 1588 | deactivate_super(s); |
@@ -1608,6 +1662,9 @@ static int nfs_xdev_get_sb(struct file_system_type *fs_type, int flags, | |||
1608 | mnt->mnt_sb = s; | 1662 | mnt->mnt_sb = s; |
1609 | mnt->mnt_root = mntroot; | 1663 | mnt->mnt_root = mntroot; |
1610 | 1664 | ||
1665 | /* clone any lsm security options from the parent to the new sb */ | ||
1666 | security_sb_clone_mnt_opts(data->sb, s); | ||
1667 | |||
1611 | dprintk("<-- nfs_xdev_get_sb() = 0\n"); | 1668 | dprintk("<-- nfs_xdev_get_sb() = 0\n"); |
1612 | return 0; | 1669 | return 0; |
1613 | 1670 | ||
@@ -1850,6 +1907,8 @@ static int nfs4_get_sb(struct file_system_type *fs_type, | |||
1850 | }; | 1907 | }; |
1851 | int error; | 1908 | int error; |
1852 | 1909 | ||
1910 | security_init_mnt_opts(&data.lsm_opts); | ||
1911 | |||
1853 | /* Validate the mount data */ | 1912 | /* Validate the mount data */ |
1854 | error = nfs4_validate_mount_data(raw_data, &data, dev_name); | 1913 | error = nfs4_validate_mount_data(raw_data, &data, dev_name); |
1855 | if (error < 0) | 1914 | if (error < 0) |
@@ -1898,6 +1957,7 @@ out: | |||
1898 | kfree(data.client_address); | 1957 | kfree(data.client_address); |
1899 | kfree(data.nfs_server.export_path); | 1958 | kfree(data.nfs_server.export_path); |
1900 | kfree(data.nfs_server.hostname); | 1959 | kfree(data.nfs_server.hostname); |
1960 | security_free_mnt_opts(&data.lsm_opts); | ||
1901 | return error; | 1961 | return error; |
1902 | 1962 | ||
1903 | out_free: | 1963 | out_free: |
diff --git a/fs/super.c b/fs/super.c index 88811f60c8de..010446d8c40a 100644 --- a/fs/super.c +++ b/fs/super.c | |||
@@ -870,12 +870,12 @@ vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void | |||
870 | if (!mnt) | 870 | if (!mnt) |
871 | goto out; | 871 | goto out; |
872 | 872 | ||
873 | if (data) { | 873 | if (data && !(type->fs_flags & FS_BINARY_MOUNTDATA)) { |
874 | secdata = alloc_secdata(); | 874 | secdata = alloc_secdata(); |
875 | if (!secdata) | 875 | if (!secdata) |
876 | goto out_mnt; | 876 | goto out_mnt; |
877 | 877 | ||
878 | error = security_sb_copy_data(type, data, secdata); | 878 | error = security_sb_copy_data(data, secdata); |
879 | if (error) | 879 | if (error) |
880 | goto out_free_secdata; | 880 | goto out_free_secdata; |
881 | } | 881 | } |
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c index f01b07687faf..8e09b71f4104 100644 --- a/fs/xfs/xfs_iget.c +++ b/fs/xfs/xfs_iget.c | |||
@@ -235,6 +235,7 @@ finish_inode: | |||
235 | */ | 235 | */ |
236 | new_icl = kmem_zone_alloc(xfs_icluster_zone, KM_SLEEP); | 236 | new_icl = kmem_zone_alloc(xfs_icluster_zone, KM_SLEEP); |
237 | if (radix_tree_preload(GFP_KERNEL)) { | 237 | if (radix_tree_preload(GFP_KERNEL)) { |
238 | xfs_idestroy(ip); | ||
238 | delay(1); | 239 | delay(1); |
239 | goto again; | 240 | goto again; |
240 | } | 241 | } |
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c index 4d6330eddc8d..76d470d8a1e6 100644 --- a/fs/xfs/xfs_trans_ail.c +++ b/fs/xfs/xfs_trans_ail.c | |||
@@ -261,16 +261,19 @@ xfsaild_push( | |||
261 | xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE); | 261 | xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE); |
262 | } | 262 | } |
263 | 263 | ||
264 | /* | 264 | if (!count) { |
265 | * We reached the target so wait a bit longer for I/O to complete and | 265 | /* We're past our target or empty, so idle */ |
266 | * remove pushed items from the AIL before we start the next scan from | 266 | tout = 1000; |
267 | * the start of the AIL. | 267 | } else if (XFS_LSN_CMP(lsn, target) >= 0) { |
268 | */ | 268 | /* |
269 | if ((XFS_LSN_CMP(lsn, target) >= 0)) { | 269 | * We reached the target so wait a bit longer for I/O to |
270 | * complete and remove pushed items from the AIL before we | ||
271 | * start the next scan from the start of the AIL. | ||
272 | */ | ||
270 | tout += 20; | 273 | tout += 20; |
271 | last_pushed_lsn = 0; | 274 | last_pushed_lsn = 0; |
272 | } else if ((restarts > XFS_TRANS_PUSH_AIL_RESTARTS) || | 275 | } else if ((restarts > XFS_TRANS_PUSH_AIL_RESTARTS) || |
273 | (count && ((stuck * 100) / count > 90))) { | 276 | ((stuck * 100) / count > 90)) { |
274 | /* | 277 | /* |
275 | * Either there is a lot of contention on the AIL or we | 278 | * Either there is a lot of contention on the AIL or we |
276 | * are stuck due to operations in progress. "Stuck" in this | 279 | * are stuck due to operations in progress. "Stuck" in this |
diff --git a/include/asm-blackfin/mmu_context.h b/include/asm-blackfin/mmu_context.h index b5eb67596ad5..f55ec3c23a92 100644 --- a/include/asm-blackfin/mmu_context.h +++ b/include/asm-blackfin/mmu_context.h | |||
@@ -73,7 +73,7 @@ static inline void destroy_context(struct mm_struct *mm) | |||
73 | struct sram_list_struct *tmp; | 73 | struct sram_list_struct *tmp; |
74 | 74 | ||
75 | if (current_l1_stack_save == mm->context.l1_stack_save) | 75 | if (current_l1_stack_save == mm->context.l1_stack_save) |
76 | current_l1_stack_save = 0; | 76 | current_l1_stack_save = NULL; |
77 | if (mm->context.l1_stack_save) | 77 | if (mm->context.l1_stack_save) |
78 | free_l1stack(); | 78 | free_l1stack(); |
79 | 79 | ||
diff --git a/include/asm-blackfin/unistd.h b/include/asm-blackfin/unistd.h index e98167358d26..c18a399f6e3e 100644 --- a/include/asm-blackfin/unistd.h +++ b/include/asm-blackfin/unistd.h | |||
@@ -361,7 +361,7 @@ | |||
361 | #define __NR_epoll_pwait 346 | 361 | #define __NR_epoll_pwait 346 |
362 | #define __NR_utimensat 347 | 362 | #define __NR_utimensat 347 |
363 | #define __NR_signalfd 348 | 363 | #define __NR_signalfd 348 |
364 | #define __NR_timerfd 349 | 364 | #define __NR_timerfd_create 349 |
365 | #define __NR_eventfd 350 | 365 | #define __NR_eventfd 350 |
366 | #define __NR_pread64 351 | 366 | #define __NR_pread64 351 |
367 | #define __NR_pwrite64 352 | 367 | #define __NR_pwrite64 352 |
@@ -370,8 +370,10 @@ | |||
370 | #define __NR_get_robust_list 355 | 370 | #define __NR_get_robust_list 355 |
371 | #define __NR_fallocate 356 | 371 | #define __NR_fallocate 356 |
372 | #define __NR_semtimedop 357 | 372 | #define __NR_semtimedop 357 |
373 | #define __NR_timerfd_settime 358 | ||
374 | #define __NR_timerfd_gettime 359 | ||
373 | 375 | ||
374 | #define __NR_syscall 358 | 376 | #define __NR_syscall 360 |
375 | #define NR_syscalls __NR_syscall | 377 | #define NR_syscalls __NR_syscall |
376 | 378 | ||
377 | /* Old optional stuff no one actually uses */ | 379 | /* Old optional stuff no one actually uses */ |
diff --git a/include/asm-ia64/kprobes.h b/include/asm-ia64/kprobes.h index 8233b3a964c6..d03bf9ff68e3 100644 --- a/include/asm-ia64/kprobes.h +++ b/include/asm-ia64/kprobes.h | |||
@@ -117,7 +117,7 @@ struct arch_specific_insn { | |||
117 | unsigned short slot; | 117 | unsigned short slot; |
118 | }; | 118 | }; |
119 | 119 | ||
120 | extern int kprobes_fault_handler(struct pt_regs *regs, int trapnr); | 120 | extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr); |
121 | extern int kprobe_exceptions_notify(struct notifier_block *self, | 121 | extern int kprobe_exceptions_notify(struct notifier_block *self, |
122 | unsigned long val, void *data); | 122 | unsigned long val, void *data); |
123 | 123 | ||
diff --git a/include/asm-ia64/ptrace.h b/include/asm-ia64/ptrace.h index 0bdce7dde1b0..4b2a8d40ebc5 100644 --- a/include/asm-ia64/ptrace.h +++ b/include/asm-ia64/ptrace.h | |||
@@ -233,8 +233,6 @@ struct switch_stack { | |||
233 | #include <asm/current.h> | 233 | #include <asm/current.h> |
234 | #include <asm/page.h> | 234 | #include <asm/page.h> |
235 | 235 | ||
236 | #define __ARCH_SYS_PTRACE 1 | ||
237 | |||
238 | /* | 236 | /* |
239 | * We use the ia64_psr(regs)->ri to determine which of the three | 237 | * We use the ia64_psr(regs)->ri to determine which of the three |
240 | * instructions in bundle (16 bytes) took the sample. Generate | 238 | * instructions in bundle (16 bytes) took the sample. Generate |
@@ -314,6 +312,13 @@ struct switch_stack { | |||
314 | #define arch_ptrace_attach(child) \ | 312 | #define arch_ptrace_attach(child) \ |
315 | ptrace_attach_sync_user_rbs(child) | 313 | ptrace_attach_sync_user_rbs(child) |
316 | 314 | ||
315 | #define arch_has_single_step() (1) | ||
316 | extern void user_enable_single_step(struct task_struct *); | ||
317 | extern void user_disable_single_step(struct task_struct *); | ||
318 | |||
319 | #define arch_has_block_step() (1) | ||
320 | extern void user_enable_block_step(struct task_struct *); | ||
321 | |||
317 | #endif /* !__KERNEL__ */ | 322 | #endif /* !__KERNEL__ */ |
318 | 323 | ||
319 | /* pt_all_user_regs is used for PTRACE_GETREGS PTRACE_SETREGS */ | 324 | /* pt_all_user_regs is used for PTRACE_GETREGS PTRACE_SETREGS */ |
diff --git a/include/asm-mn10300/Kbuild b/include/asm-mn10300/Kbuild index 79384c537dc6..c68e1680da01 100644 --- a/include/asm-mn10300/Kbuild +++ b/include/asm-mn10300/Kbuild | |||
@@ -1,5 +1 @@ | |||
1 | include include/asm-generic/Kbuild.asm | include include/asm-generic/Kbuild.asm | |
2 | |||
3 | unifdef-y += termios.h | ||
4 | unifdef-y += ptrace.h | ||
5 | unifdef-y += page.h | ||
diff --git a/include/asm-x86/Kbuild b/include/asm-x86/Kbuild index b04a7ff46df1..3b8160a2b47e 100644 --- a/include/asm-x86/Kbuild +++ b/include/asm-x86/Kbuild | |||
@@ -16,7 +16,6 @@ unifdef-y += ist.h | |||
16 | unifdef-y += mce.h | 16 | unifdef-y += mce.h |
17 | unifdef-y += msr.h | 17 | unifdef-y += msr.h |
18 | unifdef-y += mtrr.h | 18 | unifdef-y += mtrr.h |
19 | unifdef-y += page.h | ||
20 | unifdef-y += posix_types_32.h | 19 | unifdef-y += posix_types_32.h |
21 | unifdef-y += posix_types_64.h | 20 | unifdef-y += posix_types_64.h |
22 | unifdef-y += ptrace.h | 21 | unifdef-y += ptrace.h |
diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h index 2ba42cd7d6aa..a8f12644a13c 100644 --- a/include/crypto/internal/skcipher.h +++ b/include/crypto/internal/skcipher.h | |||
@@ -15,6 +15,7 @@ | |||
15 | 15 | ||
16 | #include <crypto/algapi.h> | 16 | #include <crypto/algapi.h> |
17 | #include <crypto/skcipher.h> | 17 | #include <crypto/skcipher.h> |
18 | #include <linux/init.h> | ||
18 | #include <linux/types.h> | 19 | #include <linux/types.h> |
19 | 20 | ||
20 | struct rtattr; | 21 | struct rtattr; |
@@ -64,6 +65,11 @@ void skcipher_geniv_free(struct crypto_instance *inst); | |||
64 | int skcipher_geniv_init(struct crypto_tfm *tfm); | 65 | int skcipher_geniv_init(struct crypto_tfm *tfm); |
65 | void skcipher_geniv_exit(struct crypto_tfm *tfm); | 66 | void skcipher_geniv_exit(struct crypto_tfm *tfm); |
66 | 67 | ||
68 | int __init eseqiv_module_init(void); | ||
69 | void __exit eseqiv_module_exit(void); | ||
70 | int __init chainiv_module_init(void); | ||
71 | void __exit chainiv_module_exit(void); | ||
72 | |||
67 | static inline struct crypto_ablkcipher *skcipher_geniv_cipher( | 73 | static inline struct crypto_ablkcipher *skcipher_geniv_cipher( |
68 | struct crypto_ablkcipher *geniv) | 74 | struct crypto_ablkcipher *geniv) |
69 | { | 75 | { |
diff --git a/include/linux/security.h b/include/linux/security.h index fe52cdeab0a6..b07357ca2137 100644 --- a/include/linux/security.h +++ b/include/linux/security.h | |||
@@ -34,12 +34,6 @@ | |||
34 | #include <linux/xfrm.h> | 34 | #include <linux/xfrm.h> |
35 | #include <net/flow.h> | 35 | #include <net/flow.h> |
36 | 36 | ||
37 | /* only a char in selinux superblock security struct flags */ | ||
38 | #define FSCONTEXT_MNT 0x01 | ||
39 | #define CONTEXT_MNT 0x02 | ||
40 | #define ROOTCONTEXT_MNT 0x04 | ||
41 | #define DEFCONTEXT_MNT 0x08 | ||
42 | |||
43 | extern unsigned securebits; | 37 | extern unsigned securebits; |
44 | 38 | ||
45 | struct ctl_table; | 39 | struct ctl_table; |
@@ -114,6 +108,32 @@ struct request_sock; | |||
114 | 108 | ||
115 | #ifdef CONFIG_SECURITY | 109 | #ifdef CONFIG_SECURITY |
116 | 110 | ||
111 | struct security_mnt_opts { | ||
112 | char **mnt_opts; | ||
113 | int *mnt_opts_flags; | ||
114 | int num_mnt_opts; | ||
115 | }; | ||
116 | |||
117 | static inline void security_init_mnt_opts(struct security_mnt_opts *opts) | ||
118 | { | ||
119 | opts->mnt_opts = NULL; | ||
120 | opts->mnt_opts_flags = NULL; | ||
121 | opts->num_mnt_opts = 0; | ||
122 | } | ||
123 | |||
124 | static inline void security_free_mnt_opts(struct security_mnt_opts *opts) | ||
125 | { | ||
126 | int i; | ||
127 | if (opts->mnt_opts) | ||
128 | for(i = 0; i < opts->num_mnt_opts; i++) | ||
129 | kfree(opts->mnt_opts[i]); | ||
130 | kfree(opts->mnt_opts); | ||
131 | opts->mnt_opts = NULL; | ||
132 | kfree(opts->mnt_opts_flags); | ||
133 | opts->mnt_opts_flags = NULL; | ||
134 | opts->num_mnt_opts = 0; | ||
135 | } | ||
136 | |||
117 | /** | 137 | /** |
118 | * struct security_operations - main security structure | 138 | * struct security_operations - main security structure |
119 | * | 139 | * |
@@ -262,19 +282,19 @@ struct request_sock; | |||
262 | * @sb_get_mnt_opts: | 282 | * @sb_get_mnt_opts: |
263 | * Get the security relevant mount options used for a superblock | 283 | * Get the security relevant mount options used for a superblock |
264 | * @sb the superblock to get security mount options from | 284 | * @sb the superblock to get security mount options from |
265 | * @mount_options array for pointers to mount options | 285 | * @opts binary data structure containing all lsm mount data |
266 | * @mount_flags array of ints specifying what each mount options is | ||
267 | * @num_opts number of options in the arrays | ||
268 | * @sb_set_mnt_opts: | 286 | * @sb_set_mnt_opts: |
269 | * Set the security relevant mount options used for a superblock | 287 | * Set the security relevant mount options used for a superblock |
270 | * @sb the superblock to set security mount options for | 288 | * @sb the superblock to set security mount options for |
271 | * @mount_options array for pointers to mount options | 289 | * @opts binary data structure containing all lsm mount data |
272 | * @mount_flags array of ints specifying what each mount options is | ||
273 | * @num_opts number of options in the arrays | ||
274 | * @sb_clone_mnt_opts: | 290 | * @sb_clone_mnt_opts: |
275 | * Copy all security options from a given superblock to another | 291 | * Copy all security options from a given superblock to another |
276 | * @oldsb old superblock which contain information to clone | 292 | * @oldsb old superblock which contain information to clone |
277 | * @newsb new superblock which needs filled in | 293 | * @newsb new superblock which needs filled in |
294 | * @sb_parse_opts_str: | ||
295 | * Parse a string of security data filling in the opts structure | ||
296 | * @options string containing all mount options known by the LSM | ||
297 | * @opts binary data structure usable by the LSM | ||
278 | * | 298 | * |
279 | * Security hooks for inode operations. | 299 | * Security hooks for inode operations. |
280 | * | 300 | * |
@@ -1238,8 +1258,7 @@ struct security_operations { | |||
1238 | 1258 | ||
1239 | int (*sb_alloc_security) (struct super_block * sb); | 1259 | int (*sb_alloc_security) (struct super_block * sb); |
1240 | void (*sb_free_security) (struct super_block * sb); | 1260 | void (*sb_free_security) (struct super_block * sb); |
1241 | int (*sb_copy_data)(struct file_system_type *type, | 1261 | int (*sb_copy_data)(char *orig, char *copy); |
1242 | void *orig, void *copy); | ||
1243 | int (*sb_kern_mount) (struct super_block *sb, void *data); | 1262 | int (*sb_kern_mount) (struct super_block *sb, void *data); |
1244 | int (*sb_statfs) (struct dentry *dentry); | 1263 | int (*sb_statfs) (struct dentry *dentry); |
1245 | int (*sb_mount) (char *dev_name, struct nameidata * nd, | 1264 | int (*sb_mount) (char *dev_name, struct nameidata * nd, |
@@ -1257,12 +1276,12 @@ struct security_operations { | |||
1257 | void (*sb_post_pivotroot) (struct nameidata * old_nd, | 1276 | void (*sb_post_pivotroot) (struct nameidata * old_nd, |
1258 | struct nameidata * new_nd); | 1277 | struct nameidata * new_nd); |
1259 | int (*sb_get_mnt_opts) (const struct super_block *sb, | 1278 | int (*sb_get_mnt_opts) (const struct super_block *sb, |
1260 | char ***mount_options, int **flags, | 1279 | struct security_mnt_opts *opts); |
1261 | int *num_opts); | 1280 | int (*sb_set_mnt_opts) (struct super_block *sb, |
1262 | int (*sb_set_mnt_opts) (struct super_block *sb, char **mount_options, | 1281 | struct security_mnt_opts *opts); |
1263 | int *flags, int num_opts); | ||
1264 | void (*sb_clone_mnt_opts) (const struct super_block *oldsb, | 1282 | void (*sb_clone_mnt_opts) (const struct super_block *oldsb, |
1265 | struct super_block *newsb); | 1283 | struct super_block *newsb); |
1284 | int (*sb_parse_opts_str) (char *options, struct security_mnt_opts *opts); | ||
1266 | 1285 | ||
1267 | int (*inode_alloc_security) (struct inode *inode); | 1286 | int (*inode_alloc_security) (struct inode *inode); |
1268 | void (*inode_free_security) (struct inode *inode); | 1287 | void (*inode_free_security) (struct inode *inode); |
@@ -1507,7 +1526,7 @@ int security_bprm_check(struct linux_binprm *bprm); | |||
1507 | int security_bprm_secureexec(struct linux_binprm *bprm); | 1526 | int security_bprm_secureexec(struct linux_binprm *bprm); |
1508 | int security_sb_alloc(struct super_block *sb); | 1527 | int security_sb_alloc(struct super_block *sb); |
1509 | void security_sb_free(struct super_block *sb); | 1528 | void security_sb_free(struct super_block *sb); |
1510 | int security_sb_copy_data(struct file_system_type *type, void *orig, void *copy); | 1529 | int security_sb_copy_data(char *orig, char *copy); |
1511 | int security_sb_kern_mount(struct super_block *sb, void *data); | 1530 | int security_sb_kern_mount(struct super_block *sb, void *data); |
1512 | int security_sb_statfs(struct dentry *dentry); | 1531 | int security_sb_statfs(struct dentry *dentry); |
1513 | int security_sb_mount(char *dev_name, struct nameidata *nd, | 1532 | int security_sb_mount(char *dev_name, struct nameidata *nd, |
@@ -1520,12 +1539,12 @@ void security_sb_post_remount(struct vfsmount *mnt, unsigned long flags, void *d | |||
1520 | void security_sb_post_addmount(struct vfsmount *mnt, struct nameidata *mountpoint_nd); | 1539 | void security_sb_post_addmount(struct vfsmount *mnt, struct nameidata *mountpoint_nd); |
1521 | int security_sb_pivotroot(struct nameidata *old_nd, struct nameidata *new_nd); | 1540 | int security_sb_pivotroot(struct nameidata *old_nd, struct nameidata *new_nd); |
1522 | void security_sb_post_pivotroot(struct nameidata *old_nd, struct nameidata *new_nd); | 1541 | void security_sb_post_pivotroot(struct nameidata *old_nd, struct nameidata *new_nd); |
1523 | int security_sb_get_mnt_opts(const struct super_block *sb, char ***mount_options, | 1542 | int security_sb_get_mnt_opts(const struct super_block *sb, |
1524 | int **flags, int *num_opts); | 1543 | struct security_mnt_opts *opts); |
1525 | int security_sb_set_mnt_opts(struct super_block *sb, char **mount_options, | 1544 | int security_sb_set_mnt_opts(struct super_block *sb, struct security_mnt_opts *opts); |
1526 | int *flags, int num_opts); | ||
1527 | void security_sb_clone_mnt_opts(const struct super_block *oldsb, | 1545 | void security_sb_clone_mnt_opts(const struct super_block *oldsb, |
1528 | struct super_block *newsb); | 1546 | struct super_block *newsb); |
1547 | int security_sb_parse_opts_str(char *options, struct security_mnt_opts *opts); | ||
1529 | 1548 | ||
1530 | int security_inode_alloc(struct inode *inode); | 1549 | int security_inode_alloc(struct inode *inode); |
1531 | void security_inode_free(struct inode *inode); | 1550 | void security_inode_free(struct inode *inode); |
@@ -1635,6 +1654,16 @@ int security_secctx_to_secid(char *secdata, u32 seclen, u32 *secid); | |||
1635 | void security_release_secctx(char *secdata, u32 seclen); | 1654 | void security_release_secctx(char *secdata, u32 seclen); |
1636 | 1655 | ||
1637 | #else /* CONFIG_SECURITY */ | 1656 | #else /* CONFIG_SECURITY */ |
1657 | struct security_mnt_opts { | ||
1658 | }; | ||
1659 | |||
1660 | static inline void security_init_mnt_opts(struct security_mnt_opts *opts) | ||
1661 | { | ||
1662 | } | ||
1663 | |||
1664 | static inline void security_free_mnt_opts(struct security_mnt_opts *opts) | ||
1665 | { | ||
1666 | } | ||
1638 | 1667 | ||
1639 | /* | 1668 | /* |
1640 | * This is the default capabilities functionality. Most of these functions | 1669 | * This is the default capabilities functionality. Most of these functions |
@@ -1762,8 +1791,7 @@ static inline int security_sb_alloc (struct super_block *sb) | |||
1762 | static inline void security_sb_free (struct super_block *sb) | 1791 | static inline void security_sb_free (struct super_block *sb) |
1763 | { } | 1792 | { } |
1764 | 1793 | ||
1765 | static inline int security_sb_copy_data (struct file_system_type *type, | 1794 | static inline int security_sb_copy_data (char *orig, char *copy) |
1766 | void *orig, void *copy) | ||
1767 | { | 1795 | { |
1768 | return 0; | 1796 | return 0; |
1769 | } | 1797 | } |
@@ -1819,6 +1847,27 @@ static inline int security_sb_pivotroot (struct nameidata *old_nd, | |||
1819 | static inline void security_sb_post_pivotroot (struct nameidata *old_nd, | 1847 | static inline void security_sb_post_pivotroot (struct nameidata *old_nd, |
1820 | struct nameidata *new_nd) | 1848 | struct nameidata *new_nd) |
1821 | { } | 1849 | { } |
1850 | static inline int security_sb_get_mnt_opts(const struct super_block *sb, | ||
1851 | struct security_mnt_opts *opts) | ||
1852 | { | ||
1853 | security_init_mnt_opts(opts); | ||
1854 | return 0; | ||
1855 | } | ||
1856 | |||
1857 | static inline int security_sb_set_mnt_opts(struct super_block *sb, | ||
1858 | struct security_mnt_opts *opts) | ||
1859 | { | ||
1860 | return 0; | ||
1861 | } | ||
1862 | |||
1863 | static inline void security_sb_clone_mnt_opts(const struct super_block *oldsb, | ||
1864 | struct super_block *newsb) | ||
1865 | { } | ||
1866 | |||
1867 | static inline int security_sb_parse_opts_str(char *options, struct security_mnt_opts *opts) | ||
1868 | { | ||
1869 | return 0; | ||
1870 | } | ||
1822 | 1871 | ||
1823 | static inline int security_inode_alloc (struct inode *inode) | 1872 | static inline int security_inode_alloc (struct inode *inode) |
1824 | { | 1873 | { |
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h index 3ffd6b582a97..39e1cac24bb7 100644 --- a/include/scsi/libsas.h +++ b/include/scsi/libsas.h | |||
@@ -675,5 +675,6 @@ extern int sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, | |||
675 | 675 | ||
676 | extern void sas_ssp_task_response(struct device *dev, struct sas_task *task, | 676 | extern void sas_ssp_task_response(struct device *dev, struct sas_task *task, |
677 | struct ssp_response_iu *iu); | 677 | struct ssp_response_iu *iu); |
678 | struct sas_phy *sas_find_local_phy(struct domain_device *dev); | ||
678 | 679 | ||
679 | #endif /* _SASLIB_H_ */ | 680 | #endif /* _SASLIB_H_ */ |
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h index dbc96ef4cc72..aab1eae2ec4c 100644 --- a/include/scsi/scsi_transport_iscsi.h +++ b/include/scsi/scsi_transport_iscsi.h | |||
@@ -177,6 +177,8 @@ struct iscsi_cls_session { | |||
177 | struct list_head host_list; | 177 | struct list_head host_list; |
178 | struct iscsi_transport *transport; | 178 | struct iscsi_transport *transport; |
179 | spinlock_t lock; | 179 | spinlock_t lock; |
180 | struct work_struct block_work; | ||
181 | struct work_struct unblock_work; | ||
180 | struct work_struct scan_work; | 182 | struct work_struct scan_work; |
181 | struct work_struct unbind_work; | 183 | struct work_struct unbind_work; |
182 | 184 | ||
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 3e296ed81d4d..a1b61f414228 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -322,8 +322,8 @@ static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask) | |||
322 | * Call without callback_mutex or task_lock() held. May be | 322 | * Call without callback_mutex or task_lock() held. May be |
323 | * called with or without cgroup_mutex held. Thanks in part to | 323 | * called with or without cgroup_mutex held. Thanks in part to |
324 | * 'the_top_cpuset_hack', the task's cpuset pointer will never | 324 | * 'the_top_cpuset_hack', the task's cpuset pointer will never |
325 | * be NULL. This routine also might acquire callback_mutex and | 325 | * be NULL. This routine also might acquire callback_mutex during |
326 | * current->mm->mmap_sem during call. | 326 | * call. |
327 | * | 327 | * |
328 | * Reading current->cpuset->mems_generation doesn't need task_lock | 328 | * Reading current->cpuset->mems_generation doesn't need task_lock |
329 | * to guard the current->cpuset derefence, because it is guarded | 329 | * to guard the current->cpuset derefence, because it is guarded |
diff --git a/security/dummy.c b/security/dummy.c index 649326bf64ea..78d8f92310a4 100644 --- a/security/dummy.c +++ b/security/dummy.c | |||
@@ -181,8 +181,7 @@ static void dummy_sb_free_security (struct super_block *sb) | |||
181 | return; | 181 | return; |
182 | } | 182 | } |
183 | 183 | ||
184 | static int dummy_sb_copy_data (struct file_system_type *type, | 184 | static int dummy_sb_copy_data (char *orig, char *copy) |
185 | void *orig, void *copy) | ||
186 | { | 185 | { |
187 | return 0; | 186 | return 0; |
188 | } | 187 | } |
@@ -245,19 +244,17 @@ static void dummy_sb_post_pivotroot (struct nameidata *old_nd, struct nameidata | |||
245 | return; | 244 | return; |
246 | } | 245 | } |
247 | 246 | ||
248 | static int dummy_sb_get_mnt_opts(const struct super_block *sb, char ***mount_options, | 247 | static int dummy_sb_get_mnt_opts(const struct super_block *sb, |
249 | int **flags, int *num_opts) | 248 | struct security_mnt_opts *opts) |
250 | { | 249 | { |
251 | *mount_options = NULL; | 250 | security_init_mnt_opts(opts); |
252 | *flags = NULL; | ||
253 | *num_opts = 0; | ||
254 | return 0; | 251 | return 0; |
255 | } | 252 | } |
256 | 253 | ||
257 | static int dummy_sb_set_mnt_opts(struct super_block *sb, char **mount_options, | 254 | static int dummy_sb_set_mnt_opts(struct super_block *sb, |
258 | int *flags, int num_opts) | 255 | struct security_mnt_opts *opts) |
259 | { | 256 | { |
260 | if (unlikely(num_opts)) | 257 | if (unlikely(opts->num_mnt_opts)) |
261 | return -EOPNOTSUPP; | 258 | return -EOPNOTSUPP; |
262 | return 0; | 259 | return 0; |
263 | } | 260 | } |
@@ -268,6 +265,11 @@ static void dummy_sb_clone_mnt_opts(const struct super_block *oldsb, | |||
268 | return; | 265 | return; |
269 | } | 266 | } |
270 | 267 | ||
268 | static int dummy_sb_parse_opts_str(char *options, struct security_mnt_opts *opts) | ||
269 | { | ||
270 | return 0; | ||
271 | } | ||
272 | |||
271 | static int dummy_inode_alloc_security (struct inode *inode) | 273 | static int dummy_inode_alloc_security (struct inode *inode) |
272 | { | 274 | { |
273 | return 0; | 275 | return 0; |
@@ -1028,6 +1030,7 @@ void security_fixup_ops (struct security_operations *ops) | |||
1028 | set_to_dummy_if_null(ops, sb_get_mnt_opts); | 1030 | set_to_dummy_if_null(ops, sb_get_mnt_opts); |
1029 | set_to_dummy_if_null(ops, sb_set_mnt_opts); | 1031 | set_to_dummy_if_null(ops, sb_set_mnt_opts); |
1030 | set_to_dummy_if_null(ops, sb_clone_mnt_opts); | 1032 | set_to_dummy_if_null(ops, sb_clone_mnt_opts); |
1033 | set_to_dummy_if_null(ops, sb_parse_opts_str); | ||
1031 | set_to_dummy_if_null(ops, inode_alloc_security); | 1034 | set_to_dummy_if_null(ops, inode_alloc_security); |
1032 | set_to_dummy_if_null(ops, inode_free_security); | 1035 | set_to_dummy_if_null(ops, inode_free_security); |
1033 | set_to_dummy_if_null(ops, inode_init_security); | 1036 | set_to_dummy_if_null(ops, inode_init_security); |
diff --git a/security/security.c b/security/security.c index d15e56cbaade..b1387a6b416d 100644 --- a/security/security.c +++ b/security/security.c | |||
@@ -244,10 +244,11 @@ void security_sb_free(struct super_block *sb) | |||
244 | security_ops->sb_free_security(sb); | 244 | security_ops->sb_free_security(sb); |
245 | } | 245 | } |
246 | 246 | ||
247 | int security_sb_copy_data(struct file_system_type *type, void *orig, void *copy) | 247 | int security_sb_copy_data(char *orig, char *copy) |
248 | { | 248 | { |
249 | return security_ops->sb_copy_data(type, orig, copy); | 249 | return security_ops->sb_copy_data(orig, copy); |
250 | } | 250 | } |
251 | EXPORT_SYMBOL(security_sb_copy_data); | ||
251 | 252 | ||
252 | int security_sb_kern_mount(struct super_block *sb, void *data) | 253 | int security_sb_kern_mount(struct super_block *sb, void *data) |
253 | { | 254 | { |
@@ -306,24 +307,30 @@ void security_sb_post_pivotroot(struct nameidata *old_nd, struct nameidata *new_ | |||
306 | } | 307 | } |
307 | 308 | ||
308 | int security_sb_get_mnt_opts(const struct super_block *sb, | 309 | int security_sb_get_mnt_opts(const struct super_block *sb, |
309 | char ***mount_options, | 310 | struct security_mnt_opts *opts) |
310 | int **flags, int *num_opts) | ||
311 | { | 311 | { |
312 | return security_ops->sb_get_mnt_opts(sb, mount_options, flags, num_opts); | 312 | return security_ops->sb_get_mnt_opts(sb, opts); |
313 | } | 313 | } |
314 | 314 | ||
315 | int security_sb_set_mnt_opts(struct super_block *sb, | 315 | int security_sb_set_mnt_opts(struct super_block *sb, |
316 | char **mount_options, | 316 | struct security_mnt_opts *opts) |
317 | int *flags, int num_opts) | ||
318 | { | 317 | { |
319 | return security_ops->sb_set_mnt_opts(sb, mount_options, flags, num_opts); | 318 | return security_ops->sb_set_mnt_opts(sb, opts); |
320 | } | 319 | } |
320 | EXPORT_SYMBOL(security_sb_set_mnt_opts); | ||
321 | 321 | ||
322 | void security_sb_clone_mnt_opts(const struct super_block *oldsb, | 322 | void security_sb_clone_mnt_opts(const struct super_block *oldsb, |
323 | struct super_block *newsb) | 323 | struct super_block *newsb) |
324 | { | 324 | { |
325 | security_ops->sb_clone_mnt_opts(oldsb, newsb); | 325 | security_ops->sb_clone_mnt_opts(oldsb, newsb); |
326 | } | 326 | } |
327 | EXPORT_SYMBOL(security_sb_clone_mnt_opts); | ||
328 | |||
329 | int security_sb_parse_opts_str(char *options, struct security_mnt_opts *opts) | ||
330 | { | ||
331 | return security_ops->sb_parse_opts_str(options, opts); | ||
332 | } | ||
333 | EXPORT_SYMBOL(security_sb_parse_opts_str); | ||
327 | 334 | ||
328 | int security_inode_alloc(struct inode *inode) | 335 | int security_inode_alloc(struct inode *inode) |
329 | { | 336 | { |
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 75c2e99bfb81..4bf4807f2d44 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c | |||
@@ -443,8 +443,7 @@ out: | |||
443 | * mount options, or whatever. | 443 | * mount options, or whatever. |
444 | */ | 444 | */ |
445 | static int selinux_get_mnt_opts(const struct super_block *sb, | 445 | static int selinux_get_mnt_opts(const struct super_block *sb, |
446 | char ***mount_options, int **mnt_opts_flags, | 446 | struct security_mnt_opts *opts) |
447 | int *num_opts) | ||
448 | { | 447 | { |
449 | int rc = 0, i; | 448 | int rc = 0, i; |
450 | struct superblock_security_struct *sbsec = sb->s_security; | 449 | struct superblock_security_struct *sbsec = sb->s_security; |
@@ -452,9 +451,7 @@ static int selinux_get_mnt_opts(const struct super_block *sb, | |||
452 | u32 len; | 451 | u32 len; |
453 | char tmp; | 452 | char tmp; |
454 | 453 | ||
455 | *num_opts = 0; | 454 | security_init_mnt_opts(opts); |
456 | *mount_options = NULL; | ||
457 | *mnt_opts_flags = NULL; | ||
458 | 455 | ||
459 | if (!sbsec->initialized) | 456 | if (!sbsec->initialized) |
460 | return -EINVAL; | 457 | return -EINVAL; |
@@ -470,18 +467,18 @@ static int selinux_get_mnt_opts(const struct super_block *sb, | |||
470 | /* count the number of mount options for this sb */ | 467 | /* count the number of mount options for this sb */ |
471 | for (i = 0; i < 8; i++) { | 468 | for (i = 0; i < 8; i++) { |
472 | if (tmp & 0x01) | 469 | if (tmp & 0x01) |
473 | (*num_opts)++; | 470 | opts->num_mnt_opts++; |
474 | tmp >>= 1; | 471 | tmp >>= 1; |
475 | } | 472 | } |
476 | 473 | ||
477 | *mount_options = kcalloc(*num_opts, sizeof(char *), GFP_ATOMIC); | 474 | opts->mnt_opts = kcalloc(opts->num_mnt_opts, sizeof(char *), GFP_ATOMIC); |
478 | if (!*mount_options) { | 475 | if (!opts->mnt_opts) { |
479 | rc = -ENOMEM; | 476 | rc = -ENOMEM; |
480 | goto out_free; | 477 | goto out_free; |
481 | } | 478 | } |
482 | 479 | ||
483 | *mnt_opts_flags = kcalloc(*num_opts, sizeof(int), GFP_ATOMIC); | 480 | opts->mnt_opts_flags = kcalloc(opts->num_mnt_opts, sizeof(int), GFP_ATOMIC); |
484 | if (!*mnt_opts_flags) { | 481 | if (!opts->mnt_opts_flags) { |
485 | rc = -ENOMEM; | 482 | rc = -ENOMEM; |
486 | goto out_free; | 483 | goto out_free; |
487 | } | 484 | } |
@@ -491,22 +488,22 @@ static int selinux_get_mnt_opts(const struct super_block *sb, | |||
491 | rc = security_sid_to_context(sbsec->sid, &context, &len); | 488 | rc = security_sid_to_context(sbsec->sid, &context, &len); |
492 | if (rc) | 489 | if (rc) |
493 | goto out_free; | 490 | goto out_free; |
494 | (*mount_options)[i] = context; | 491 | opts->mnt_opts[i] = context; |
495 | (*mnt_opts_flags)[i++] = FSCONTEXT_MNT; | 492 | opts->mnt_opts_flags[i++] = FSCONTEXT_MNT; |
496 | } | 493 | } |
497 | if (sbsec->flags & CONTEXT_MNT) { | 494 | if (sbsec->flags & CONTEXT_MNT) { |
498 | rc = security_sid_to_context(sbsec->mntpoint_sid, &context, &len); | 495 | rc = security_sid_to_context(sbsec->mntpoint_sid, &context, &len); |
499 | if (rc) | 496 | if (rc) |
500 | goto out_free; | 497 | goto out_free; |
501 | (*mount_options)[i] = context; | 498 | opts->mnt_opts[i] = context; |
502 | (*mnt_opts_flags)[i++] = CONTEXT_MNT; | 499 | opts->mnt_opts_flags[i++] = CONTEXT_MNT; |
503 | } | 500 | } |
504 | if (sbsec->flags & DEFCONTEXT_MNT) { | 501 | if (sbsec->flags & DEFCONTEXT_MNT) { |
505 | rc = security_sid_to_context(sbsec->def_sid, &context, &len); | 502 | rc = security_sid_to_context(sbsec->def_sid, &context, &len); |
506 | if (rc) | 503 | if (rc) |
507 | goto out_free; | 504 | goto out_free; |
508 | (*mount_options)[i] = context; | 505 | opts->mnt_opts[i] = context; |
509 | (*mnt_opts_flags)[i++] = DEFCONTEXT_MNT; | 506 | opts->mnt_opts_flags[i++] = DEFCONTEXT_MNT; |
510 | } | 507 | } |
511 | if (sbsec->flags & ROOTCONTEXT_MNT) { | 508 | if (sbsec->flags & ROOTCONTEXT_MNT) { |
512 | struct inode *root = sbsec->sb->s_root->d_inode; | 509 | struct inode *root = sbsec->sb->s_root->d_inode; |
@@ -515,24 +512,16 @@ static int selinux_get_mnt_opts(const struct super_block *sb, | |||
515 | rc = security_sid_to_context(isec->sid, &context, &len); | 512 | rc = security_sid_to_context(isec->sid, &context, &len); |
516 | if (rc) | 513 | if (rc) |
517 | goto out_free; | 514 | goto out_free; |
518 | (*mount_options)[i] = context; | 515 | opts->mnt_opts[i] = context; |
519 | (*mnt_opts_flags)[i++] = ROOTCONTEXT_MNT; | 516 | opts->mnt_opts_flags[i++] = ROOTCONTEXT_MNT; |
520 | } | 517 | } |
521 | 518 | ||
522 | BUG_ON(i != *num_opts); | 519 | BUG_ON(i != opts->num_mnt_opts); |
523 | 520 | ||
524 | return 0; | 521 | return 0; |
525 | 522 | ||
526 | out_free: | 523 | out_free: |
527 | /* don't leak context string if security_sid_to_context had an error */ | 524 | security_free_mnt_opts(opts); |
528 | if (*mount_options && i) | ||
529 | for (; i > 0; i--) | ||
530 | kfree((*mount_options)[i-1]); | ||
531 | kfree(*mount_options); | ||
532 | *mount_options = NULL; | ||
533 | kfree(*mnt_opts_flags); | ||
534 | *mnt_opts_flags = NULL; | ||
535 | *num_opts = 0; | ||
536 | return rc; | 525 | return rc; |
537 | } | 526 | } |
538 | 527 | ||
@@ -553,12 +542,13 @@ static int bad_option(struct superblock_security_struct *sbsec, char flag, | |||
553 | return 1; | 542 | return 1; |
554 | return 0; | 543 | return 0; |
555 | } | 544 | } |
545 | |||
556 | /* | 546 | /* |
557 | * Allow filesystems with binary mount data to explicitly set mount point | 547 | * Allow filesystems with binary mount data to explicitly set mount point |
558 | * labeling information. | 548 | * labeling information. |
559 | */ | 549 | */ |
560 | static int selinux_set_mnt_opts(struct super_block *sb, char **mount_options, | 550 | static int selinux_set_mnt_opts(struct super_block *sb, |
561 | int *flags, int num_opts) | 551 | struct security_mnt_opts *opts) |
562 | { | 552 | { |
563 | int rc = 0, i; | 553 | int rc = 0, i; |
564 | struct task_security_struct *tsec = current->security; | 554 | struct task_security_struct *tsec = current->security; |
@@ -568,6 +558,9 @@ static int selinux_set_mnt_opts(struct super_block *sb, char **mount_options, | |||
568 | struct inode_security_struct *root_isec = inode->i_security; | 558 | struct inode_security_struct *root_isec = inode->i_security; |
569 | u32 fscontext_sid = 0, context_sid = 0, rootcontext_sid = 0; | 559 | u32 fscontext_sid = 0, context_sid = 0, rootcontext_sid = 0; |
570 | u32 defcontext_sid = 0; | 560 | u32 defcontext_sid = 0; |
561 | char **mount_options = opts->mnt_opts; | ||
562 | int *flags = opts->mnt_opts_flags; | ||
563 | int num_opts = opts->num_mnt_opts; | ||
571 | 564 | ||
572 | mutex_lock(&sbsec->lock); | 565 | mutex_lock(&sbsec->lock); |
573 | 566 | ||
@@ -589,6 +582,21 @@ static int selinux_set_mnt_opts(struct super_block *sb, char **mount_options, | |||
589 | } | 582 | } |
590 | 583 | ||
591 | /* | 584 | /* |
585 | * Binary mount data FS will come through this function twice. Once | ||
586 | * from an explicit call and once from the generic calls from the vfs. | ||
587 | * Since the generic VFS calls will not contain any security mount data | ||
588 | * we need to skip the double mount verification. | ||
589 | * | ||
590 | * This does open a hole in which we will not notice if the first | ||
591 | * mount using this sb set explict options and a second mount using | ||
592 | * this sb does not set any security options. (The first options | ||
593 | * will be used for both mounts) | ||
594 | */ | ||
595 | if (sbsec->initialized && (sb->s_type->fs_flags & FS_BINARY_MOUNTDATA) | ||
596 | && (num_opts == 0)) | ||
597 | goto out; | ||
598 | |||
599 | /* | ||
592 | * parse the mount options, check if they are valid sids. | 600 | * parse the mount options, check if they are valid sids. |
593 | * also check if someone is trying to mount the same sb more | 601 | * also check if someone is trying to mount the same sb more |
594 | * than once with different security options. | 602 | * than once with different security options. |
@@ -792,43 +800,14 @@ static void selinux_sb_clone_mnt_opts(const struct super_block *oldsb, | |||
792 | mutex_unlock(&newsbsec->lock); | 800 | mutex_unlock(&newsbsec->lock); |
793 | } | 801 | } |
794 | 802 | ||
795 | /* | 803 | int selinux_parse_opts_str(char *options, struct security_mnt_opts *opts) |
796 | * string mount options parsing and call set the sbsec | ||
797 | */ | ||
798 | static int superblock_doinit(struct super_block *sb, void *data) | ||
799 | { | 804 | { |
805 | char *p; | ||
800 | char *context = NULL, *defcontext = NULL; | 806 | char *context = NULL, *defcontext = NULL; |
801 | char *fscontext = NULL, *rootcontext = NULL; | 807 | char *fscontext = NULL, *rootcontext = NULL; |
802 | int rc = 0; | 808 | int rc, num_mnt_opts = 0; |
803 | char *p, *options = data; | ||
804 | /* selinux only know about a fixed number of mount options */ | ||
805 | char *mnt_opts[NUM_SEL_MNT_OPTS]; | ||
806 | int mnt_opts_flags[NUM_SEL_MNT_OPTS], num_mnt_opts = 0; | ||
807 | |||
808 | if (!data) | ||
809 | goto out; | ||
810 | 809 | ||
811 | /* with the nfs patch this will become a goto out; */ | 810 | opts->num_mnt_opts = 0; |
812 | if (sb->s_type->fs_flags & FS_BINARY_MOUNTDATA) { | ||
813 | const char *name = sb->s_type->name; | ||
814 | /* NFS we understand. */ | ||
815 | if (!strcmp(name, "nfs")) { | ||
816 | struct nfs_mount_data *d = data; | ||
817 | |||
818 | if (d->version != NFS_MOUNT_VERSION) | ||
819 | goto out; | ||
820 | |||
821 | if (d->context[0]) { | ||
822 | context = kstrdup(d->context, GFP_KERNEL); | ||
823 | if (!context) { | ||
824 | rc = -ENOMEM; | ||
825 | goto out; | ||
826 | } | ||
827 | } | ||
828 | goto build_flags; | ||
829 | } else | ||
830 | goto out; | ||
831 | } | ||
832 | 811 | ||
833 | /* Standard string-based options. */ | 812 | /* Standard string-based options. */ |
834 | while ((p = strsep(&options, "|")) != NULL) { | 813 | while ((p = strsep(&options, "|")) != NULL) { |
@@ -901,26 +880,37 @@ static int superblock_doinit(struct super_block *sb, void *data) | |||
901 | } | 880 | } |
902 | } | 881 | } |
903 | 882 | ||
904 | build_flags: | 883 | rc = -ENOMEM; |
884 | opts->mnt_opts = kcalloc(NUM_SEL_MNT_OPTS, sizeof(char *), GFP_ATOMIC); | ||
885 | if (!opts->mnt_opts) | ||
886 | goto out_err; | ||
887 | |||
888 | opts->mnt_opts_flags = kcalloc(NUM_SEL_MNT_OPTS, sizeof(int), GFP_ATOMIC); | ||
889 | if (!opts->mnt_opts_flags) { | ||
890 | kfree(opts->mnt_opts); | ||
891 | goto out_err; | ||
892 | } | ||
893 | |||
905 | if (fscontext) { | 894 | if (fscontext) { |
906 | mnt_opts[num_mnt_opts] = fscontext; | 895 | opts->mnt_opts[num_mnt_opts] = fscontext; |
907 | mnt_opts_flags[num_mnt_opts++] = FSCONTEXT_MNT; | 896 | opts->mnt_opts_flags[num_mnt_opts++] = FSCONTEXT_MNT; |
908 | } | 897 | } |
909 | if (context) { | 898 | if (context) { |
910 | mnt_opts[num_mnt_opts] = context; | 899 | opts->mnt_opts[num_mnt_opts] = context; |
911 | mnt_opts_flags[num_mnt_opts++] = CONTEXT_MNT; | 900 | opts->mnt_opts_flags[num_mnt_opts++] = CONTEXT_MNT; |
912 | } | 901 | } |
913 | if (rootcontext) { | 902 | if (rootcontext) { |
914 | mnt_opts[num_mnt_opts] = rootcontext; | 903 | opts->mnt_opts[num_mnt_opts] = rootcontext; |
915 | mnt_opts_flags[num_mnt_opts++] = ROOTCONTEXT_MNT; | 904 | opts->mnt_opts_flags[num_mnt_opts++] = ROOTCONTEXT_MNT; |
916 | } | 905 | } |
917 | if (defcontext) { | 906 | if (defcontext) { |
918 | mnt_opts[num_mnt_opts] = defcontext; | 907 | opts->mnt_opts[num_mnt_opts] = defcontext; |
919 | mnt_opts_flags[num_mnt_opts++] = DEFCONTEXT_MNT; | 908 | opts->mnt_opts_flags[num_mnt_opts++] = DEFCONTEXT_MNT; |
920 | } | 909 | } |
921 | 910 | ||
922 | out: | 911 | opts->num_mnt_opts = num_mnt_opts; |
923 | rc = selinux_set_mnt_opts(sb, mnt_opts, mnt_opts_flags, num_mnt_opts); | 912 | return 0; |
913 | |||
924 | out_err: | 914 | out_err: |
925 | kfree(context); | 915 | kfree(context); |
926 | kfree(defcontext); | 916 | kfree(defcontext); |
@@ -928,6 +918,33 @@ out_err: | |||
928 | kfree(rootcontext); | 918 | kfree(rootcontext); |
929 | return rc; | 919 | return rc; |
930 | } | 920 | } |
921 | /* | ||
922 | * string mount options parsing and call set the sbsec | ||
923 | */ | ||
924 | static int superblock_doinit(struct super_block *sb, void *data) | ||
925 | { | ||
926 | int rc = 0; | ||
927 | char *options = data; | ||
928 | struct security_mnt_opts opts; | ||
929 | |||
930 | security_init_mnt_opts(&opts); | ||
931 | |||
932 | if (!data) | ||
933 | goto out; | ||
934 | |||
935 | BUG_ON(sb->s_type->fs_flags & FS_BINARY_MOUNTDATA); | ||
936 | |||
937 | rc = selinux_parse_opts_str(options, &opts); | ||
938 | if (rc) | ||
939 | goto out_err; | ||
940 | |||
941 | out: | ||
942 | rc = selinux_set_mnt_opts(sb, &opts); | ||
943 | |||
944 | out_err: | ||
945 | security_free_mnt_opts(&opts); | ||
946 | return rc; | ||
947 | } | ||
931 | 948 | ||
932 | static inline u16 inode_mode_to_security_class(umode_t mode) | 949 | static inline u16 inode_mode_to_security_class(umode_t mode) |
933 | { | 950 | { |
@@ -2253,7 +2270,7 @@ static inline void take_selinux_option(char **to, char *from, int *first, | |||
2253 | } | 2270 | } |
2254 | } | 2271 | } |
2255 | 2272 | ||
2256 | static int selinux_sb_copy_data(struct file_system_type *type, void *orig, void *copy) | 2273 | static int selinux_sb_copy_data(char *orig, char *copy) |
2257 | { | 2274 | { |
2258 | int fnosec, fsec, rc = 0; | 2275 | int fnosec, fsec, rc = 0; |
2259 | char *in_save, *in_curr, *in_end; | 2276 | char *in_save, *in_curr, *in_end; |
@@ -2263,12 +2280,6 @@ static int selinux_sb_copy_data(struct file_system_type *type, void *orig, void | |||
2263 | in_curr = orig; | 2280 | in_curr = orig; |
2264 | sec_curr = copy; | 2281 | sec_curr = copy; |
2265 | 2282 | ||
2266 | /* Binary mount data: just copy */ | ||
2267 | if (type->fs_flags & FS_BINARY_MOUNTDATA) { | ||
2268 | copy_page(sec_curr, in_curr); | ||
2269 | goto out; | ||
2270 | } | ||
2271 | |||
2272 | nosec = (char *)get_zeroed_page(GFP_KERNEL); | 2283 | nosec = (char *)get_zeroed_page(GFP_KERNEL); |
2273 | if (!nosec) { | 2284 | if (!nosec) { |
2274 | rc = -ENOMEM; | 2285 | rc = -ENOMEM; |
@@ -5251,6 +5262,8 @@ static struct security_operations selinux_ops = { | |||
5251 | .sb_get_mnt_opts = selinux_get_mnt_opts, | 5262 | .sb_get_mnt_opts = selinux_get_mnt_opts, |
5252 | .sb_set_mnt_opts = selinux_set_mnt_opts, | 5263 | .sb_set_mnt_opts = selinux_set_mnt_opts, |
5253 | .sb_clone_mnt_opts = selinux_sb_clone_mnt_opts, | 5264 | .sb_clone_mnt_opts = selinux_sb_clone_mnt_opts, |
5265 | .sb_parse_opts_str = selinux_parse_opts_str, | ||
5266 | |||
5254 | 5267 | ||
5255 | .inode_alloc_security = selinux_inode_alloc_security, | 5268 | .inode_alloc_security = selinux_inode_alloc_security, |
5256 | .inode_free_security = selinux_inode_free_security, | 5269 | .inode_free_security = selinux_inode_free_security, |
diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h index 837ce420d2f6..f7d2f03781f2 100644 --- a/security/selinux/include/security.h +++ b/security/selinux/include/security.h | |||
@@ -35,6 +35,11 @@ | |||
35 | #define POLICYDB_VERSION_MAX POLICYDB_VERSION_POLCAP | 35 | #define POLICYDB_VERSION_MAX POLICYDB_VERSION_POLCAP |
36 | #endif | 36 | #endif |
37 | 37 | ||
38 | #define CONTEXT_MNT 0x01 | ||
39 | #define FSCONTEXT_MNT 0x02 | ||
40 | #define ROOTCONTEXT_MNT 0x04 | ||
41 | #define DEFCONTEXT_MNT 0x08 | ||
42 | |||
38 | struct netlbl_lsm_secattr; | 43 | struct netlbl_lsm_secattr; |
39 | 44 | ||
40 | extern int selinux_enabled; | 45 | extern int selinux_enabled; |
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c index 770eb067e165..0241fd359675 100644 --- a/security/smack/smack_lsm.c +++ b/security/smack/smack_lsm.c | |||
@@ -189,17 +189,10 @@ static void smack_sb_free_security(struct super_block *sb) | |||
189 | * Copy the Smack specific mount options out of the mount | 189 | * Copy the Smack specific mount options out of the mount |
190 | * options list. | 190 | * options list. |
191 | */ | 191 | */ |
192 | static int smack_sb_copy_data(struct file_system_type *type, void *orig, | 192 | static int smack_sb_copy_data(char *orig, char *smackopts) |
193 | void *smackopts) | ||
194 | { | 193 | { |
195 | char *cp, *commap, *otheropts, *dp; | 194 | char *cp, *commap, *otheropts, *dp; |
196 | 195 | ||
197 | /* Binary mount data: just copy */ | ||
198 | if (type->fs_flags & FS_BINARY_MOUNTDATA) { | ||
199 | copy_page(smackopts, orig); | ||
200 | return 0; | ||
201 | } | ||
202 | |||
203 | otheropts = (char *)get_zeroed_page(GFP_KERNEL); | 196 | otheropts = (char *)get_zeroed_page(GFP_KERNEL); |
204 | if (otheropts == NULL) | 197 | if (otheropts == NULL) |
205 | return -ENOMEM; | 198 | return -ENOMEM; |