diff options
Diffstat (limited to 'include')
170 files changed, 2103 insertions, 5400 deletions
diff --git a/include/asm-alpha/bitops.h b/include/asm-alpha/bitops.h index 302201f1a097..3f88715e811e 100644 --- a/include/asm-alpha/bitops.h +++ b/include/asm-alpha/bitops.h | |||
| @@ -261,7 +261,7 @@ static inline unsigned long ffz_b(unsigned long x) | |||
| 261 | 261 | ||
| 262 | static inline unsigned long ffz(unsigned long word) | 262 | static inline unsigned long ffz(unsigned long word) |
| 263 | { | 263 | { |
| 264 | #if defined(__alpha_cix__) && defined(__alpha_fix__) | 264 | #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67) |
| 265 | /* Whee. EV67 can calculate it directly. */ | 265 | /* Whee. EV67 can calculate it directly. */ |
| 266 | return __kernel_cttz(~word); | 266 | return __kernel_cttz(~word); |
| 267 | #else | 267 | #else |
| @@ -281,7 +281,7 @@ static inline unsigned long ffz(unsigned long word) | |||
| 281 | */ | 281 | */ |
| 282 | static inline unsigned long __ffs(unsigned long word) | 282 | static inline unsigned long __ffs(unsigned long word) |
| 283 | { | 283 | { |
| 284 | #if defined(__alpha_cix__) && defined(__alpha_fix__) | 284 | #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67) |
| 285 | /* Whee. EV67 can calculate it directly. */ | 285 | /* Whee. EV67 can calculate it directly. */ |
| 286 | return __kernel_cttz(word); | 286 | return __kernel_cttz(word); |
| 287 | #else | 287 | #else |
| @@ -313,20 +313,20 @@ static inline int ffs(int word) | |||
| 313 | /* | 313 | /* |
| 314 | * fls: find last bit set. | 314 | * fls: find last bit set. |
| 315 | */ | 315 | */ |
| 316 | #if defined(__alpha_cix__) && defined(__alpha_fix__) | 316 | #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67) |
| 317 | static inline int fls(int word) | 317 | static inline int fls(int word) |
| 318 | { | 318 | { |
| 319 | return 64 - __kernel_ctlz(word & 0xffffffff); | 319 | return 64 - __kernel_ctlz(word & 0xffffffff); |
| 320 | } | 320 | } |
| 321 | #else | 321 | #else |
| 322 | #define fls generic_fls | 322 | #include <asm-generic/bitops/fls.h> |
| 323 | #endif | 323 | #endif |
| 324 | #define fls64 generic_fls64 | 324 | #include <asm-generic/bitops/fls64.h> |
| 325 | 325 | ||
| 326 | /* Compute powers of two for the given integer. */ | 326 | /* Compute powers of two for the given integer. */ |
| 327 | static inline long floor_log2(unsigned long word) | 327 | static inline long floor_log2(unsigned long word) |
| 328 | { | 328 | { |
| 329 | #if defined(__alpha_cix__) && defined(__alpha_fix__) | 329 | #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67) |
| 330 | return 63 - __kernel_ctlz(word); | 330 | return 63 - __kernel_ctlz(word); |
| 331 | #else | 331 | #else |
| 332 | long bit; | 332 | long bit; |
| @@ -347,7 +347,7 @@ static inline long ceil_log2(unsigned long word) | |||
| 347 | * of bits set) of a N-bit word | 347 | * of bits set) of a N-bit word |
| 348 | */ | 348 | */ |
| 349 | 349 | ||
| 350 | #if defined(__alpha_cix__) && defined(__alpha_fix__) | 350 | #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67) |
| 351 | /* Whee. EV67 can calculate it directly. */ | 351 | /* Whee. EV67 can calculate it directly. */ |
| 352 | static inline unsigned long hweight64(unsigned long w) | 352 | static inline unsigned long hweight64(unsigned long w) |
| 353 | { | 353 | { |
| @@ -358,112 +358,12 @@ static inline unsigned long hweight64(unsigned long w) | |||
| 358 | #define hweight16(x) (unsigned int) hweight64((x) & 0xfffful) | 358 | #define hweight16(x) (unsigned int) hweight64((x) & 0xfffful) |
| 359 | #define hweight8(x) (unsigned int) hweight64((x) & 0xfful) | 359 | #define hweight8(x) (unsigned int) hweight64((x) & 0xfful) |
| 360 | #else | 360 | #else |
| 361 | static inline unsigned long hweight64(unsigned long w) | 361 | #include <asm-generic/bitops/hweight.h> |
| 362 | { | ||
| 363 | unsigned long result; | ||
| 364 | for (result = 0; w ; w >>= 1) | ||
| 365 | result += (w & 1); | ||
| 366 | return result; | ||
| 367 | } | ||
| 368 | |||
| 369 | #define hweight32(x) generic_hweight32(x) | ||
| 370 | #define hweight16(x) generic_hweight16(x) | ||
| 371 | #define hweight8(x) generic_hweight8(x) | ||
| 372 | #endif | 362 | #endif |
| 373 | 363 | ||
| 374 | #endif /* __KERNEL__ */ | 364 | #endif /* __KERNEL__ */ |
| 375 | 365 | ||
| 376 | /* | 366 | #include <asm-generic/bitops/find.h> |
| 377 | * Find next zero bit in a bitmap reasonably efficiently.. | ||
| 378 | */ | ||
| 379 | static inline unsigned long | ||
| 380 | find_next_zero_bit(const void *addr, unsigned long size, unsigned long offset) | ||
| 381 | { | ||
| 382 | const unsigned long *p = addr; | ||
| 383 | unsigned long result = offset & ~63UL; | ||
| 384 | unsigned long tmp; | ||
| 385 | |||
| 386 | p += offset >> 6; | ||
| 387 | if (offset >= size) | ||
| 388 | return size; | ||
| 389 | size -= result; | ||
| 390 | offset &= 63UL; | ||
| 391 | if (offset) { | ||
| 392 | tmp = *(p++); | ||
| 393 | tmp |= ~0UL >> (64-offset); | ||
| 394 | if (size < 64) | ||
| 395 | goto found_first; | ||
| 396 | if (~tmp) | ||
| 397 | goto found_middle; | ||
| 398 | size -= 64; | ||
| 399 | result += 64; | ||
| 400 | } | ||
| 401 | while (size & ~63UL) { | ||
| 402 | if (~(tmp = *(p++))) | ||
| 403 | goto found_middle; | ||
| 404 | result += 64; | ||
| 405 | size -= 64; | ||
| 406 | } | ||
| 407 | if (!size) | ||
| 408 | return result; | ||
| 409 | tmp = *p; | ||
| 410 | found_first: | ||
| 411 | tmp |= ~0UL << size; | ||
| 412 | if (tmp == ~0UL) /* Are any bits zero? */ | ||
| 413 | return result + size; /* Nope. */ | ||
| 414 | found_middle: | ||
| 415 | return result + ffz(tmp); | ||
| 416 | } | ||
| 417 | |||
| 418 | /* | ||
| 419 | * Find next one bit in a bitmap reasonably efficiently. | ||
| 420 | */ | ||
| 421 | static inline unsigned long | ||
| 422 | find_next_bit(const void * addr, unsigned long size, unsigned long offset) | ||
| 423 | { | ||
| 424 | const unsigned long *p = addr; | ||
| 425 | unsigned long result = offset & ~63UL; | ||
| 426 | unsigned long tmp; | ||
| 427 | |||
| 428 | p += offset >> 6; | ||
| 429 | if (offset >= size) | ||
| 430 | return size; | ||
| 431 | size -= result; | ||
| 432 | offset &= 63UL; | ||
| 433 | if (offset) { | ||
| 434 | tmp = *(p++); | ||
| 435 | tmp &= ~0UL << offset; | ||
| 436 | if (size < 64) | ||
| 437 | goto found_first; | ||
| 438 | if (tmp) | ||
| 439 | goto found_middle; | ||
| 440 | size -= 64; | ||
| 441 | result += 64; | ||
| 442 | } | ||
| 443 | while (size & ~63UL) { | ||
| 444 | if ((tmp = *(p++))) | ||
| 445 | goto found_middle; | ||
| 446 | result += 64; | ||
| 447 | size -= 64; | ||
| 448 | } | ||
| 449 | if (!size) | ||
| 450 | return result; | ||
| 451 | tmp = *p; | ||
| 452 | found_first: | ||
| 453 | tmp &= ~0UL >> (64 - size); | ||
| 454 | if (!tmp) | ||
| 455 | return result + size; | ||
| 456 | found_middle: | ||
| 457 | return result + __ffs(tmp); | ||
| 458 | } | ||
| 459 | |||
| 460 | /* | ||
| 461 | * The optimizer actually does good code for this case. | ||
| 462 | */ | ||
| 463 | #define find_first_zero_bit(addr, size) \ | ||
| 464 | find_next_zero_bit((addr), (size), 0) | ||
| 465 | #define find_first_bit(addr, size) \ | ||
| 466 | find_next_bit((addr), (size), 0) | ||
| 467 | 367 | ||
| 468 | #ifdef __KERNEL__ | 368 | #ifdef __KERNEL__ |
| 469 | 369 | ||
| @@ -487,21 +387,12 @@ sched_find_first_bit(unsigned long b[3]) | |||
| 487 | return __ffs(b0) + ofs; | 387 | return __ffs(b0) + ofs; |
| 488 | } | 388 | } |
| 489 | 389 | ||
| 390 | #include <asm-generic/bitops/ext2-non-atomic.h> | ||
| 490 | 391 | ||
| 491 | #define ext2_set_bit __test_and_set_bit | ||
| 492 | #define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a) | 392 | #define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a) |
| 493 | #define ext2_clear_bit __test_and_clear_bit | ||
| 494 | #define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a) | 393 | #define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a) |
| 495 | #define ext2_test_bit test_bit | 394 | |
| 496 | #define ext2_find_first_zero_bit find_first_zero_bit | 395 | #include <asm-generic/bitops/minix.h> |
| 497 | #define ext2_find_next_zero_bit find_next_zero_bit | ||
| 498 | |||
| 499 | /* Bitmap functions for the minix filesystem. */ | ||
| 500 | #define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,addr) | ||
| 501 | #define minix_set_bit(nr,addr) __set_bit(nr,addr) | ||
| 502 | #define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,addr) | ||
| 503 | #define minix_test_bit(nr,addr) test_bit(nr,addr) | ||
| 504 | #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) | ||
| 505 | 396 | ||
| 506 | #endif /* __KERNEL__ */ | 397 | #endif /* __KERNEL__ */ |
| 507 | 398 | ||
diff --git a/include/asm-alpha/fpu.h b/include/asm-alpha/fpu.h index c203fc2fa5cd..ecb17a72acc3 100644 --- a/include/asm-alpha/fpu.h +++ b/include/asm-alpha/fpu.h | |||
| @@ -130,7 +130,7 @@ rdfpcr(void) | |||
| 130 | { | 130 | { |
| 131 | unsigned long tmp, ret; | 131 | unsigned long tmp, ret; |
| 132 | 132 | ||
| 133 | #if defined(__alpha_cix__) || defined(__alpha_fix__) | 133 | #if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67) |
| 134 | __asm__ __volatile__ ( | 134 | __asm__ __volatile__ ( |
| 135 | "ftoit $f0,%0\n\t" | 135 | "ftoit $f0,%0\n\t" |
| 136 | "mf_fpcr $f0\n\t" | 136 | "mf_fpcr $f0\n\t" |
| @@ -154,7 +154,7 @@ wrfpcr(unsigned long val) | |||
| 154 | { | 154 | { |
| 155 | unsigned long tmp; | 155 | unsigned long tmp; |
| 156 | 156 | ||
| 157 | #if defined(__alpha_cix__) || defined(__alpha_fix__) | 157 | #if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67) |
| 158 | __asm__ __volatile__ ( | 158 | __asm__ __volatile__ ( |
| 159 | "ftoit $f0,%0\n\t" | 159 | "ftoit $f0,%0\n\t" |
| 160 | "itoft %1,$f0\n\t" | 160 | "itoft %1,$f0\n\t" |
diff --git a/include/asm-alpha/mmzone.h b/include/asm-alpha/mmzone.h index a011ef4cf3d3..192d80c875b0 100644 --- a/include/asm-alpha/mmzone.h +++ b/include/asm-alpha/mmzone.h | |||
| @@ -59,9 +59,6 @@ PLAT_NODE_DATA_LOCALNR(unsigned long p, int n) | |||
| 59 | #define kvaddr_to_nid(kaddr) pa_to_nid(__pa(kaddr)) | 59 | #define kvaddr_to_nid(kaddr) pa_to_nid(__pa(kaddr)) |
| 60 | #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) | 60 | #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) |
| 61 | 61 | ||
| 62 | #define local_mapnr(kvaddr) \ | ||
| 63 | ((__pa(kvaddr) >> PAGE_SHIFT) - node_start_pfn(kvaddr_to_nid(kvaddr))) | ||
| 64 | |||
| 65 | /* | 62 | /* |
| 66 | * Given a kaddr, LOCAL_BASE_ADDR finds the owning node of the memory | 63 | * Given a kaddr, LOCAL_BASE_ADDR finds the owning node of the memory |
| 67 | * and returns the kaddr corresponding to first physical page in the | 64 | * and returns the kaddr corresponding to first physical page in the |
| @@ -86,8 +83,7 @@ PLAT_NODE_DATA_LOCALNR(unsigned long p, int n) | |||
| 86 | pte_t pte; \ | 83 | pte_t pte; \ |
| 87 | unsigned long pfn; \ | 84 | unsigned long pfn; \ |
| 88 | \ | 85 | \ |
| 89 | pfn = ((unsigned long)((page)-page_zone(page)->zone_mem_map)) << 32; \ | 86 | pfn = page_to_pfn(page) << 32; \ |
| 90 | pfn += page_zone(page)->zone_start_pfn << 32; \ | ||
| 91 | pte_val(pte) = pfn | pgprot_val(pgprot); \ | 87 | pte_val(pte) = pfn | pgprot_val(pgprot); \ |
| 92 | \ | 88 | \ |
| 93 | pte; \ | 89 | pte; \ |
| @@ -104,19 +100,8 @@ PLAT_NODE_DATA_LOCALNR(unsigned long p, int n) | |||
| 104 | __xx; \ | 100 | __xx; \ |
| 105 | }) | 101 | }) |
| 106 | 102 | ||
| 107 | #define pfn_to_page(pfn) \ | ||
| 108 | ({ \ | ||
| 109 | unsigned long kaddr = (unsigned long)__va((pfn) << PAGE_SHIFT); \ | ||
| 110 | (NODE_DATA(kvaddr_to_nid(kaddr))->node_mem_map + local_mapnr(kaddr)); \ | ||
| 111 | }) | ||
| 112 | |||
| 113 | #define page_to_pfn(page) \ | ||
| 114 | ((page) - page_zone(page)->zone_mem_map + \ | ||
| 115 | (page_zone(page)->zone_start_pfn)) | ||
| 116 | |||
| 117 | #define page_to_pa(page) \ | 103 | #define page_to_pa(page) \ |
| 118 | ((( (page) - page_zone(page)->zone_mem_map ) \ | 104 | (page_to_pfn(page) << PAGE_SHIFT) |
| 119 | + page_zone(page)->zone_start_pfn) << PAGE_SHIFT) | ||
| 120 | 105 | ||
| 121 | #define pfn_to_nid(pfn) pa_to_nid(((u64)(pfn) << PAGE_SHIFT)) | 106 | #define pfn_to_nid(pfn) pa_to_nid(((u64)(pfn) << PAGE_SHIFT)) |
| 122 | #define pfn_valid(pfn) \ | 107 | #define pfn_valid(pfn) \ |
diff --git a/include/asm-alpha/page.h b/include/asm-alpha/page.h index fa0b41b164a7..61bcf70b5eac 100644 --- a/include/asm-alpha/page.h +++ b/include/asm-alpha/page.h | |||
| @@ -85,8 +85,6 @@ typedef unsigned long pgprot_t; | |||
| 85 | #define __pa(x) ((unsigned long) (x) - PAGE_OFFSET) | 85 | #define __pa(x) ((unsigned long) (x) - PAGE_OFFSET) |
| 86 | #define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET)) | 86 | #define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET)) |
| 87 | #ifndef CONFIG_DISCONTIGMEM | 87 | #ifndef CONFIG_DISCONTIGMEM |
| 88 | #define pfn_to_page(pfn) (mem_map + (pfn)) | ||
| 89 | #define page_to_pfn(page) ((unsigned long)((page) - mem_map)) | ||
| 90 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) | 88 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) |
| 91 | 89 | ||
| 92 | #define pfn_valid(pfn) ((pfn) < max_mapnr) | 90 | #define pfn_valid(pfn) ((pfn) < max_mapnr) |
| @@ -95,9 +93,9 @@ typedef unsigned long pgprot_t; | |||
| 95 | 93 | ||
| 96 | #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ | 94 | #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ |
| 97 | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) | 95 | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) |
| 98 | |||
| 99 | #endif /* __KERNEL__ */ | 96 | #endif /* __KERNEL__ */ |
| 100 | 97 | ||
| 98 | #include <asm-generic/memory_model.h> | ||
| 101 | #include <asm-generic/page.h> | 99 | #include <asm-generic/page.h> |
| 102 | 100 | ||
| 103 | #endif /* _ALPHA_PAGE_H */ | 101 | #endif /* _ALPHA_PAGE_H */ |
diff --git a/include/asm-arm/bitops.h b/include/asm-arm/bitops.h index d02de721ecc1..0ac54b1a8bad 100644 --- a/include/asm-arm/bitops.h +++ b/include/asm-arm/bitops.h | |||
| @@ -117,65 +117,7 @@ ____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p) | |||
| 117 | return res & mask; | 117 | return res & mask; |
| 118 | } | 118 | } |
| 119 | 119 | ||
| 120 | /* | 120 | #include <asm-generic/bitops/non-atomic.h> |
| 121 | * Now the non-atomic variants. We let the compiler handle all | ||
| 122 | * optimisations for these. These are all _native_ endian. | ||
| 123 | */ | ||
| 124 | static inline void __set_bit(int nr, volatile unsigned long *p) | ||
| 125 | { | ||
| 126 | p[nr >> 5] |= (1UL << (nr & 31)); | ||
| 127 | } | ||
| 128 | |||
| 129 | static inline void __clear_bit(int nr, volatile unsigned long *p) | ||
| 130 | { | ||
| 131 | p[nr >> 5] &= ~(1UL << (nr & 31)); | ||
| 132 | } | ||
| 133 | |||
| 134 | static inline void __change_bit(int nr, volatile unsigned long *p) | ||
| 135 | { | ||
| 136 | p[nr >> 5] ^= (1UL << (nr & 31)); | ||
| 137 | } | ||
| 138 | |||
| 139 | static inline int __test_and_set_bit(int nr, volatile unsigned long *p) | ||
| 140 | { | ||
| 141 | unsigned long oldval, mask = 1UL << (nr & 31); | ||
| 142 | |||
| 143 | p += nr >> 5; | ||
| 144 | |||
| 145 | oldval = *p; | ||
| 146 | *p = oldval | mask; | ||
| 147 | return oldval & mask; | ||
| 148 | } | ||
| 149 | |||
| 150 | static inline int __test_and_clear_bit(int nr, volatile unsigned long *p) | ||
| 151 | { | ||
| 152 | unsigned long oldval, mask = 1UL << (nr & 31); | ||
| 153 | |||
| 154 | p += nr >> 5; | ||
| 155 | |||
| 156 | oldval = *p; | ||
| 157 | *p = oldval & ~mask; | ||
| 158 | return oldval & mask; | ||
| 159 | } | ||
| 160 | |||
| 161 | static inline int __test_and_change_bit(int nr, volatile unsigned long *p) | ||
| 162 | { | ||
| 163 | unsigned long oldval, mask = 1UL << (nr & 31); | ||
| 164 | |||
| 165 | p += nr >> 5; | ||
| 166 | |||
| 167 | oldval = *p; | ||
| 168 | *p = oldval ^ mask; | ||
| 169 | return oldval & mask; | ||
| 170 | } | ||
| 171 | |||
| 172 | /* | ||
| 173 | * This routine doesn't need to be atomic. | ||
| 174 | */ | ||
| 175 | static inline int __test_bit(int nr, const volatile unsigned long * p) | ||
| 176 | { | ||
| 177 | return (p[nr >> 5] >> (nr & 31)) & 1UL; | ||
| 178 | } | ||
| 179 | 121 | ||
| 180 | /* | 122 | /* |
| 181 | * A note about Endian-ness. | 123 | * A note about Endian-ness. |
| @@ -261,7 +203,6 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset); | |||
| 261 | #define test_and_set_bit(nr,p) ATOMIC_BITOP_LE(test_and_set_bit,nr,p) | 203 | #define test_and_set_bit(nr,p) ATOMIC_BITOP_LE(test_and_set_bit,nr,p) |
| 262 | #define test_and_clear_bit(nr,p) ATOMIC_BITOP_LE(test_and_clear_bit,nr,p) | 204 | #define test_and_clear_bit(nr,p) ATOMIC_BITOP_LE(test_and_clear_bit,nr,p) |
| 263 | #define test_and_change_bit(nr,p) ATOMIC_BITOP_LE(test_and_change_bit,nr,p) | 205 | #define test_and_change_bit(nr,p) ATOMIC_BITOP_LE(test_and_change_bit,nr,p) |
| 264 | #define test_bit(nr,p) __test_bit(nr,p) | ||
| 265 | #define find_first_zero_bit(p,sz) _find_first_zero_bit_le(p,sz) | 206 | #define find_first_zero_bit(p,sz) _find_first_zero_bit_le(p,sz) |
| 266 | #define find_next_zero_bit(p,sz,off) _find_next_zero_bit_le(p,sz,off) | 207 | #define find_next_zero_bit(p,sz,off) _find_next_zero_bit_le(p,sz,off) |
| 267 | #define find_first_bit(p,sz) _find_first_bit_le(p,sz) | 208 | #define find_first_bit(p,sz) _find_first_bit_le(p,sz) |
| @@ -280,7 +221,6 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset); | |||
| 280 | #define test_and_set_bit(nr,p) ATOMIC_BITOP_BE(test_and_set_bit,nr,p) | 221 | #define test_and_set_bit(nr,p) ATOMIC_BITOP_BE(test_and_set_bit,nr,p) |
| 281 | #define test_and_clear_bit(nr,p) ATOMIC_BITOP_BE(test_and_clear_bit,nr,p) | 222 | #define test_and_clear_bit(nr,p) ATOMIC_BITOP_BE(test_and_clear_bit,nr,p) |
| 282 | #define test_and_change_bit(nr,p) ATOMIC_BITOP_BE(test_and_change_bit,nr,p) | 223 | #define test_and_change_bit(nr,p) ATOMIC_BITOP_BE(test_and_change_bit,nr,p) |
| 283 | #define test_bit(nr,p) __test_bit(nr,p) | ||
| 284 | #define find_first_zero_bit(p,sz) _find_first_zero_bit_be(p,sz) | 224 | #define find_first_zero_bit(p,sz) _find_first_zero_bit_be(p,sz) |
| 285 | #define find_next_zero_bit(p,sz,off) _find_next_zero_bit_be(p,sz,off) | 225 | #define find_next_zero_bit(p,sz,off) _find_next_zero_bit_be(p,sz,off) |
| 286 | #define find_first_bit(p,sz) _find_first_bit_be(p,sz) | 226 | #define find_first_bit(p,sz) _find_first_bit_be(p,sz) |
| @@ -292,57 +232,41 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset); | |||
| 292 | 232 | ||
| 293 | #if __LINUX_ARM_ARCH__ < 5 | 233 | #if __LINUX_ARM_ARCH__ < 5 |
| 294 | 234 | ||
| 295 | /* | 235 | #include <asm-generic/bitops/ffz.h> |
| 296 | * ffz = Find First Zero in word. Undefined if no zero exists, | 236 | #include <asm-generic/bitops/__ffs.h> |
| 297 | * so code should check against ~0UL first.. | 237 | #include <asm-generic/bitops/fls.h> |
| 298 | */ | 238 | #include <asm-generic/bitops/ffs.h> |
| 299 | static inline unsigned long ffz(unsigned long word) | ||
| 300 | { | ||
| 301 | int k; | ||
| 302 | |||
| 303 | word = ~word; | ||
| 304 | k = 31; | ||
| 305 | if (word & 0x0000ffff) { k -= 16; word <<= 16; } | ||
| 306 | if (word & 0x00ff0000) { k -= 8; word <<= 8; } | ||
| 307 | if (word & 0x0f000000) { k -= 4; word <<= 4; } | ||
| 308 | if (word & 0x30000000) { k -= 2; word <<= 2; } | ||
| 309 | if (word & 0x40000000) { k -= 1; } | ||
| 310 | return k; | ||
| 311 | } | ||
| 312 | |||
| 313 | /* | ||
| 314 | * ffz = Find First Zero in word. Undefined if no zero exists, | ||
| 315 | * so code should check against ~0UL first.. | ||
| 316 | */ | ||
| 317 | static inline unsigned long __ffs(unsigned long word) | ||
| 318 | { | ||
| 319 | int k; | ||
| 320 | |||
| 321 | k = 31; | ||
| 322 | if (word & 0x0000ffff) { k -= 16; word <<= 16; } | ||
| 323 | if (word & 0x00ff0000) { k -= 8; word <<= 8; } | ||
| 324 | if (word & 0x0f000000) { k -= 4; word <<= 4; } | ||
| 325 | if (word & 0x30000000) { k -= 2; word <<= 2; } | ||
| 326 | if (word & 0x40000000) { k -= 1; } | ||
| 327 | return k; | ||
| 328 | } | ||
| 329 | |||
| 330 | /* | ||
| 331 | * fls: find last bit set. | ||
| 332 | */ | ||
| 333 | 239 | ||
| 334 | #define fls(x) generic_fls(x) | 240 | #else |
| 335 | #define fls64(x) generic_fls64(x) | ||
| 336 | |||
| 337 | /* | ||
| 338 | * ffs: find first bit set. This is defined the same way as | ||
| 339 | * the libc and compiler builtin ffs routines, therefore | ||
| 340 | * differs in spirit from the above ffz (man ffs). | ||
| 341 | */ | ||
| 342 | 241 | ||
| 343 | #define ffs(x) generic_ffs(x) | 242 | static inline int constant_fls(int x) |
| 243 | { | ||
| 244 | int r = 32; | ||
| 344 | 245 | ||
| 345 | #else | 246 | if (!x) |
| 247 | return 0; | ||
| 248 | if (!(x & 0xffff0000u)) { | ||
| 249 | x <<= 16; | ||
| 250 | r -= 16; | ||
| 251 | } | ||
| 252 | if (!(x & 0xff000000u)) { | ||
| 253 | x <<= 8; | ||
| 254 | r -= 8; | ||
| 255 | } | ||
| 256 | if (!(x & 0xf0000000u)) { | ||
| 257 | x <<= 4; | ||
| 258 | r -= 4; | ||
| 259 | } | ||
| 260 | if (!(x & 0xc0000000u)) { | ||
| 261 | x <<= 2; | ||
| 262 | r -= 2; | ||
| 263 | } | ||
| 264 | if (!(x & 0x80000000u)) { | ||
| 265 | x <<= 1; | ||
| 266 | r -= 1; | ||
| 267 | } | ||
| 268 | return r; | ||
| 269 | } | ||
| 346 | 270 | ||
| 347 | /* | 271 | /* |
| 348 | * On ARMv5 and above those functions can be implemented around | 272 | * On ARMv5 and above those functions can be implemented around |
| @@ -350,39 +274,18 @@ static inline unsigned long __ffs(unsigned long word) | |||
| 350 | */ | 274 | */ |
| 351 | 275 | ||
| 352 | #define fls(x) \ | 276 | #define fls(x) \ |
| 353 | ( __builtin_constant_p(x) ? generic_fls(x) : \ | 277 | ( __builtin_constant_p(x) ? constant_fls(x) : \ |
| 354 | ({ int __r; asm("clz\t%0, %1" : "=r"(__r) : "r"(x) : "cc"); 32-__r; }) ) | 278 | ({ int __r; asm("clz\t%0, %1" : "=r"(__r) : "r"(x) : "cc"); 32-__r; }) ) |
| 355 | #define fls64(x) generic_fls64(x) | ||
| 356 | #define ffs(x) ({ unsigned long __t = (x); fls(__t & -__t); }) | 279 | #define ffs(x) ({ unsigned long __t = (x); fls(__t & -__t); }) |
| 357 | #define __ffs(x) (ffs(x) - 1) | 280 | #define __ffs(x) (ffs(x) - 1) |
| 358 | #define ffz(x) __ffs( ~(x) ) | 281 | #define ffz(x) __ffs( ~(x) ) |
| 359 | 282 | ||
| 360 | #endif | 283 | #endif |
| 361 | 284 | ||
| 362 | /* | 285 | #include <asm-generic/bitops/fls64.h> |
| 363 | * Find first bit set in a 168-bit bitmap, where the first | ||
| 364 | * 128 bits are unlikely to be set. | ||
| 365 | */ | ||
| 366 | static inline int sched_find_first_bit(const unsigned long *b) | ||
| 367 | { | ||
| 368 | unsigned long v; | ||
| 369 | unsigned int off; | ||
| 370 | |||
| 371 | for (off = 0; v = b[off], off < 4; off++) { | ||
| 372 | if (unlikely(v)) | ||
| 373 | break; | ||
| 374 | } | ||
| 375 | return __ffs(v) + off * 32; | ||
| 376 | } | ||
| 377 | |||
| 378 | /* | ||
| 379 | * hweightN: returns the hamming weight (i.e. the number | ||
| 380 | * of bits set) of a N-bit word | ||
| 381 | */ | ||
| 382 | 286 | ||
| 383 | #define hweight32(x) generic_hweight32(x) | 287 | #include <asm-generic/bitops/sched.h> |
| 384 | #define hweight16(x) generic_hweight16(x) | 288 | #include <asm-generic/bitops/hweight.h> |
| 385 | #define hweight8(x) generic_hweight8(x) | ||
| 386 | 289 | ||
| 387 | /* | 290 | /* |
| 388 | * Ext2 is defined to use little-endian byte ordering. | 291 | * Ext2 is defined to use little-endian byte ordering. |
| @@ -397,7 +300,7 @@ static inline int sched_find_first_bit(const unsigned long *b) | |||
| 397 | #define ext2_clear_bit_atomic(lock,nr,p) \ | 300 | #define ext2_clear_bit_atomic(lock,nr,p) \ |
| 398 | test_and_clear_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) | 301 | test_and_clear_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) |
| 399 | #define ext2_test_bit(nr,p) \ | 302 | #define ext2_test_bit(nr,p) \ |
| 400 | __test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) | 303 | test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) |
| 401 | #define ext2_find_first_zero_bit(p,sz) \ | 304 | #define ext2_find_first_zero_bit(p,sz) \ |
| 402 | _find_first_zero_bit_le(p,sz) | 305 | _find_first_zero_bit_le(p,sz) |
| 403 | #define ext2_find_next_zero_bit(p,sz,off) \ | 306 | #define ext2_find_next_zero_bit(p,sz,off) \ |
| @@ -410,7 +313,7 @@ static inline int sched_find_first_bit(const unsigned long *b) | |||
| 410 | #define minix_set_bit(nr,p) \ | 313 | #define minix_set_bit(nr,p) \ |
| 411 | __set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) | 314 | __set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) |
| 412 | #define minix_test_bit(nr,p) \ | 315 | #define minix_test_bit(nr,p) \ |
| 413 | __test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) | 316 | test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) |
| 414 | #define minix_test_and_set_bit(nr,p) \ | 317 | #define minix_test_and_set_bit(nr,p) \ |
| 415 | __test_and_set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) | 318 | __test_and_set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) |
| 416 | #define minix_test_and_clear_bit(nr,p) \ | 319 | #define minix_test_and_clear_bit(nr,p) \ |
diff --git a/include/asm-arm/memory.h b/include/asm-arm/memory.h index b4e1146ab682..afa5c3ea077c 100644 --- a/include/asm-arm/memory.h +++ b/include/asm-arm/memory.h | |||
| @@ -172,9 +172,7 @@ static inline __deprecated void *bus_to_virt(unsigned long x) | |||
| 172 | * virt_addr_valid(k) indicates whether a virtual address is valid | 172 | * virt_addr_valid(k) indicates whether a virtual address is valid |
| 173 | */ | 173 | */ |
| 174 | #ifndef CONFIG_DISCONTIGMEM | 174 | #ifndef CONFIG_DISCONTIGMEM |
| 175 | 175 | #define ARCH_PFN_OFFSET (PHYS_PFN_OFFSET) | |
| 176 | #define page_to_pfn(page) (((page) - mem_map) + PHYS_PFN_OFFSET) | ||
| 177 | #define pfn_to_page(pfn) ((mem_map + (pfn)) - PHYS_PFN_OFFSET) | ||
| 178 | #define pfn_valid(pfn) ((pfn) >= PHYS_PFN_OFFSET && (pfn) < (PHYS_PFN_OFFSET + max_mapnr)) | 176 | #define pfn_valid(pfn) ((pfn) >= PHYS_PFN_OFFSET && (pfn) < (PHYS_PFN_OFFSET + max_mapnr)) |
| 179 | 177 | ||
| 180 | #define virt_to_page(kaddr) (pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)) | 178 | #define virt_to_page(kaddr) (pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)) |
| @@ -189,13 +187,8 @@ static inline __deprecated void *bus_to_virt(unsigned long x) | |||
| 189 | * around in memory. | 187 | * around in memory. |
| 190 | */ | 188 | */ |
| 191 | #include <linux/numa.h> | 189 | #include <linux/numa.h> |
| 192 | 190 | #define arch_pfn_to_nid(pfn) (PFN_TO_NID(pfn)) | |
| 193 | #define page_to_pfn(page) \ | 191 | #define arch_local_page_offset(pfn, nid) (LOCAL_MAP_NR((pfn) << PAGE_OFFSET)) |
| 194 | (( (page) - page_zone(page)->zone_mem_map) \ | ||
| 195 | + page_zone(page)->zone_start_pfn) | ||
| 196 | |||
| 197 | #define pfn_to_page(pfn) \ | ||
| 198 | (PFN_TO_MAPBASE(pfn) + LOCAL_MAP_NR((pfn) << PAGE_SHIFT)) | ||
| 199 | 192 | ||
| 200 | #define pfn_valid(pfn) \ | 193 | #define pfn_valid(pfn) \ |
| 201 | ({ \ | 194 | ({ \ |
| @@ -243,4 +236,6 @@ static inline __deprecated void *bus_to_virt(unsigned long x) | |||
| 243 | 236 | ||
| 244 | #endif | 237 | #endif |
| 245 | 238 | ||
| 239 | #include <asm-generic/memory_model.h> | ||
| 240 | |||
| 246 | #endif | 241 | #endif |
diff --git a/include/asm-arm/rtc.h b/include/asm-arm/rtc.h index 370dfe77589d..1a5c9232a91e 100644 --- a/include/asm-arm/rtc.h +++ b/include/asm-arm/rtc.h | |||
| @@ -25,9 +25,6 @@ struct rtc_ops { | |||
| 25 | int (*proc)(char *buf); | 25 | int (*proc)(char *buf); |
| 26 | }; | 26 | }; |
| 27 | 27 | ||
| 28 | void rtc_time_to_tm(unsigned long, struct rtc_time *); | ||
| 29 | int rtc_tm_to_time(struct rtc_time *, unsigned long *); | ||
| 30 | int rtc_valid_tm(struct rtc_time *); | ||
| 31 | void rtc_next_alarm_time(struct rtc_time *, struct rtc_time *, struct rtc_time *); | 28 | void rtc_next_alarm_time(struct rtc_time *, struct rtc_time *, struct rtc_time *); |
| 32 | void rtc_update(unsigned long, unsigned long); | 29 | void rtc_update(unsigned long, unsigned long); |
| 33 | int register_rtc(struct rtc_ops *); | 30 | int register_rtc(struct rtc_ops *); |
diff --git a/include/asm-arm26/bitops.h b/include/asm-arm26/bitops.h index d87f8634e625..19a69573a654 100644 --- a/include/asm-arm26/bitops.h +++ b/include/asm-arm26/bitops.h | |||
| @@ -117,65 +117,7 @@ ____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p) | |||
| 117 | return res & mask; | 117 | return res & mask; |
| 118 | } | 118 | } |
| 119 | 119 | ||
| 120 | /* | 120 | #include <asm-generic/bitops/non-atomic.h> |
| 121 | * Now the non-atomic variants. We let the compiler handle all | ||
| 122 | * optimisations for these. These are all _native_ endian. | ||
| 123 | */ | ||
| 124 | static inline void __set_bit(int nr, volatile unsigned long *p) | ||
| 125 | { | ||
| 126 | p[nr >> 5] |= (1UL << (nr & 31)); | ||
| 127 | } | ||
| 128 | |||
| 129 | static inline void __clear_bit(int nr, volatile unsigned long *p) | ||
| 130 | { | ||
| 131 | p[nr >> 5] &= ~(1UL << (nr & 31)); | ||
| 132 | } | ||
| 133 | |||
| 134 | static inline void __change_bit(int nr, volatile unsigned long *p) | ||
| 135 | { | ||
| 136 | p[nr >> 5] ^= (1UL << (nr & 31)); | ||
| 137 | } | ||
| 138 | |||
| 139 | static inline int __test_and_set_bit(int nr, volatile unsigned long *p) | ||
| 140 | { | ||
| 141 | unsigned long oldval, mask = 1UL << (nr & 31); | ||
| 142 | |||
| 143 | p += nr >> 5; | ||
| 144 | |||
| 145 | oldval = *p; | ||
| 146 | *p = oldval | mask; | ||
| 147 | return oldval & mask; | ||
| 148 | } | ||
| 149 | |||
| 150 | static inline int __test_and_clear_bit(int nr, volatile unsigned long *p) | ||
| 151 | { | ||
| 152 | unsigned long oldval, mask = 1UL << (nr & 31); | ||
| 153 | |||
| 154 | p += nr >> 5; | ||
| 155 | |||
| 156 | oldval = *p; | ||
| 157 | *p = oldval & ~mask; | ||
| 158 | return oldval & mask; | ||
| 159 | } | ||
| 160 | |||
| 161 | static inline int __test_and_change_bit(int nr, volatile unsigned long *p) | ||
| 162 | { | ||
| 163 | unsigned long oldval, mask = 1UL << (nr & 31); | ||
| 164 | |||
| 165 | p += nr >> 5; | ||
| 166 | |||
| 167 | oldval = *p; | ||
| 168 | *p = oldval ^ mask; | ||
| 169 | return oldval & mask; | ||
| 170 | } | ||
| 171 | |||
| 172 | /* | ||
| 173 | * This routine doesn't need to be atomic. | ||
| 174 | */ | ||
| 175 | static inline int __test_bit(int nr, const volatile unsigned long * p) | ||
| 176 | { | ||
| 177 | return (p[nr >> 5] >> (nr & 31)) & 1UL; | ||
| 178 | } | ||
| 179 | 121 | ||
| 180 | /* | 122 | /* |
| 181 | * Little endian assembly bitops. nr = 0 -> byte 0 bit 0. | 123 | * Little endian assembly bitops. nr = 0 -> byte 0 bit 0. |
| @@ -211,7 +153,6 @@ extern int _find_next_bit_le(const unsigned long *p, int size, int offset); | |||
| 211 | #define test_and_set_bit(nr,p) ATOMIC_BITOP_LE(test_and_set_bit,nr,p) | 153 | #define test_and_set_bit(nr,p) ATOMIC_BITOP_LE(test_and_set_bit,nr,p) |
| 212 | #define test_and_clear_bit(nr,p) ATOMIC_BITOP_LE(test_and_clear_bit,nr,p) | 154 | #define test_and_clear_bit(nr,p) ATOMIC_BITOP_LE(test_and_clear_bit,nr,p) |
| 213 | #define test_and_change_bit(nr,p) ATOMIC_BITOP_LE(test_and_change_bit,nr,p) | 155 | #define test_and_change_bit(nr,p) ATOMIC_BITOP_LE(test_and_change_bit,nr,p) |
| 214 | #define test_bit(nr,p) __test_bit(nr,p) | ||
| 215 | #define find_first_zero_bit(p,sz) _find_first_zero_bit_le(p,sz) | 156 | #define find_first_zero_bit(p,sz) _find_first_zero_bit_le(p,sz) |
| 216 | #define find_next_zero_bit(p,sz,off) _find_next_zero_bit_le(p,sz,off) | 157 | #define find_next_zero_bit(p,sz,off) _find_next_zero_bit_le(p,sz,off) |
| 217 | #define find_first_bit(p,sz) _find_first_bit_le(p,sz) | 158 | #define find_first_bit(p,sz) _find_first_bit_le(p,sz) |
| @@ -219,80 +160,13 @@ extern int _find_next_bit_le(const unsigned long *p, int size, int offset); | |||
| 219 | 160 | ||
| 220 | #define WORD_BITOFF_TO_LE(x) ((x)) | 161 | #define WORD_BITOFF_TO_LE(x) ((x)) |
| 221 | 162 | ||
| 222 | /* | 163 | #include <asm-generic/bitops/ffz.h> |
| 223 | * ffz = Find First Zero in word. Undefined if no zero exists, | 164 | #include <asm-generic/bitops/__ffs.h> |
| 224 | * so code should check against ~0UL first.. | 165 | #include <asm-generic/bitops/fls.h> |
| 225 | */ | 166 | #include <asm-generic/bitops/fls64.h> |
| 226 | static inline unsigned long ffz(unsigned long word) | 167 | #include <asm-generic/bitops/ffs.h> |
| 227 | { | 168 | #include <asm-generic/bitops/sched.h> |
| 228 | int k; | 169 | #include <asm-generic/bitops/hweight.h> |
| 229 | |||
| 230 | word = ~word; | ||
| 231 | k = 31; | ||
| 232 | if (word & 0x0000ffff) { k -= 16; word <<= 16; } | ||
| 233 | if (word & 0x00ff0000) { k -= 8; word <<= 8; } | ||
| 234 | if (word & 0x0f000000) { k -= 4; word <<= 4; } | ||
| 235 | if (word & 0x30000000) { k -= 2; word <<= 2; } | ||
| 236 | if (word & 0x40000000) { k -= 1; } | ||
| 237 | return k; | ||
| 238 | } | ||
| 239 | |||
| 240 | /* | ||
| 241 | * ffz = Find First Zero in word. Undefined if no zero exists, | ||
| 242 | * so code should check against ~0UL first.. | ||
| 243 | */ | ||
| 244 | static inline unsigned long __ffs(unsigned long word) | ||
| 245 | { | ||
| 246 | int k; | ||
| 247 | |||
| 248 | k = 31; | ||
| 249 | if (word & 0x0000ffff) { k -= 16; word <<= 16; } | ||
| 250 | if (word & 0x00ff0000) { k -= 8; word <<= 8; } | ||
| 251 | if (word & 0x0f000000) { k -= 4; word <<= 4; } | ||
| 252 | if (word & 0x30000000) { k -= 2; word <<= 2; } | ||
| 253 | if (word & 0x40000000) { k -= 1; } | ||
| 254 | return k; | ||
| 255 | } | ||
| 256 | |||
| 257 | /* | ||
| 258 | * fls: find last bit set. | ||
| 259 | */ | ||
| 260 | |||
| 261 | #define fls(x) generic_fls(x) | ||
| 262 | #define fls64(x) generic_fls64(x) | ||
| 263 | |||
| 264 | /* | ||
| 265 | * ffs: find first bit set. This is defined the same way as | ||
| 266 | * the libc and compiler builtin ffs routines, therefore | ||
| 267 | * differs in spirit from the above ffz (man ffs). | ||
| 268 | */ | ||
| 269 | |||
| 270 | #define ffs(x) generic_ffs(x) | ||
| 271 | |||
| 272 | /* | ||
| 273 | * Find first bit set in a 168-bit bitmap, where the first | ||
| 274 | * 128 bits are unlikely to be set. | ||
| 275 | */ | ||
| 276 | static inline int sched_find_first_bit(unsigned long *b) | ||
| 277 | { | ||
| 278 | unsigned long v; | ||
| 279 | unsigned int off; | ||
| 280 | |||
| 281 | for (off = 0; v = b[off], off < 4; off++) { | ||
| 282 | if (unlikely(v)) | ||
| 283 | break; | ||
| 284 | } | ||
| 285 | return __ffs(v) + off * 32; | ||
| 286 | } | ||
| 287 | |||
| 288 | /* | ||
| 289 | * hweightN: returns the hamming weight (i.e. the number | ||
| 290 | * of bits set) of a N-bit word | ||
| 291 | */ | ||
| 292 | |||
| 293 | #define hweight32(x) generic_hweight32(x) | ||
| 294 | #define hweight16(x) generic_hweight16(x) | ||
| 295 | #define hweight8(x) generic_hweight8(x) | ||
| 296 | 170 | ||
| 297 | /* | 171 | /* |
| 298 | * Ext2 is defined to use little-endian byte ordering. | 172 | * Ext2 is defined to use little-endian byte ordering. |
| @@ -307,7 +181,7 @@ static inline int sched_find_first_bit(unsigned long *b) | |||
| 307 | #define ext2_clear_bit_atomic(lock,nr,p) \ | 181 | #define ext2_clear_bit_atomic(lock,nr,p) \ |
| 308 | test_and_clear_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) | 182 | test_and_clear_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) |
| 309 | #define ext2_test_bit(nr,p) \ | 183 | #define ext2_test_bit(nr,p) \ |
| 310 | __test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) | 184 | test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) |
| 311 | #define ext2_find_first_zero_bit(p,sz) \ | 185 | #define ext2_find_first_zero_bit(p,sz) \ |
| 312 | _find_first_zero_bit_le(p,sz) | 186 | _find_first_zero_bit_le(p,sz) |
| 313 | #define ext2_find_next_zero_bit(p,sz,off) \ | 187 | #define ext2_find_next_zero_bit(p,sz,off) \ |
| @@ -320,7 +194,7 @@ static inline int sched_find_first_bit(unsigned long *b) | |||
| 320 | #define minix_set_bit(nr,p) \ | 194 | #define minix_set_bit(nr,p) \ |
| 321 | __set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) | 195 | __set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) |
| 322 | #define minix_test_bit(nr,p) \ | 196 | #define minix_test_bit(nr,p) \ |
| 323 | __test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) | 197 | test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) |
| 324 | #define minix_test_and_set_bit(nr,p) \ | 198 | #define minix_test_and_set_bit(nr,p) \ |
| 325 | __test_and_set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) | 199 | __test_and_set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) |
| 326 | #define minix_test_and_clear_bit(nr,p) \ | 200 | #define minix_test_and_clear_bit(nr,p) \ |
diff --git a/include/asm-arm26/memory.h b/include/asm-arm26/memory.h index 20d78616f650..a65f10b80dfb 100644 --- a/include/asm-arm26/memory.h +++ b/include/asm-arm26/memory.h | |||
| @@ -81,8 +81,7 @@ static inline void *phys_to_virt(unsigned long x) | |||
| 81 | * virt_to_page(k) convert a _valid_ virtual address to struct page * | 81 | * virt_to_page(k) convert a _valid_ virtual address to struct page * |
| 82 | * virt_addr_valid(k) indicates whether a virtual address is valid | 82 | * virt_addr_valid(k) indicates whether a virtual address is valid |
| 83 | */ | 83 | */ |
| 84 | #define page_to_pfn(page) (((page) - mem_map) + PHYS_PFN_OFFSET) | 84 | #define ARCH_PFN_OFFSET (PHYS_PFN_OFFSET) |
| 85 | #define pfn_to_page(pfn) ((mem_map + (pfn)) - PHYS_PFN_OFFSET) | ||
| 86 | #define pfn_valid(pfn) ((pfn) >= PHYS_PFN_OFFSET && (pfn) < (PHYS_PFN_OFFSET + max_mapnr)) | 85 | #define pfn_valid(pfn) ((pfn) >= PHYS_PFN_OFFSET && (pfn) < (PHYS_PFN_OFFSET + max_mapnr)) |
| 87 | 86 | ||
| 88 | #define virt_to_page(kaddr) (pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)) | 87 | #define virt_to_page(kaddr) (pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)) |
| @@ -98,4 +97,5 @@ static inline void *phys_to_virt(unsigned long x) | |||
| 98 | */ | 97 | */ |
| 99 | #define page_to_bus(page) (page_address(page)) | 98 | #define page_to_bus(page) (page_address(page)) |
| 100 | 99 | ||
| 100 | #include <asm-generic/memory_model.h> | ||
| 101 | #endif | 101 | #endif |
diff --git a/include/asm-cris/bitops.h b/include/asm-cris/bitops.h index b7fef1572dc0..a569065113d9 100644 --- a/include/asm-cris/bitops.h +++ b/include/asm-cris/bitops.h | |||
| @@ -39,8 +39,6 @@ struct __dummy { unsigned long a[100]; }; | |||
| 39 | 39 | ||
| 40 | #define set_bit(nr, addr) (void)test_and_set_bit(nr, addr) | 40 | #define set_bit(nr, addr) (void)test_and_set_bit(nr, addr) |
| 41 | 41 | ||
| 42 | #define __set_bit(nr, addr) (void)__test_and_set_bit(nr, addr) | ||
| 43 | |||
| 44 | /* | 42 | /* |
| 45 | * clear_bit - Clears a bit in memory | 43 | * clear_bit - Clears a bit in memory |
| 46 | * @nr: Bit to clear | 44 | * @nr: Bit to clear |
| @@ -54,8 +52,6 @@ struct __dummy { unsigned long a[100]; }; | |||
| 54 | 52 | ||
| 55 | #define clear_bit(nr, addr) (void)test_and_clear_bit(nr, addr) | 53 | #define clear_bit(nr, addr) (void)test_and_clear_bit(nr, addr) |
| 56 | 54 | ||
| 57 | #define __clear_bit(nr, addr) (void)__test_and_clear_bit(nr, addr) | ||
| 58 | |||
| 59 | /* | 55 | /* |
| 60 | * change_bit - Toggle a bit in memory | 56 | * change_bit - Toggle a bit in memory |
| 61 | * @nr: Bit to change | 57 | * @nr: Bit to change |
| @@ -68,18 +64,6 @@ struct __dummy { unsigned long a[100]; }; | |||
| 68 | 64 | ||
| 69 | #define change_bit(nr, addr) (void)test_and_change_bit(nr, addr) | 65 | #define change_bit(nr, addr) (void)test_and_change_bit(nr, addr) |
| 70 | 66 | ||
| 71 | /* | ||
| 72 | * __change_bit - Toggle a bit in memory | ||
| 73 | * @nr: the bit to change | ||
| 74 | * @addr: the address to start counting from | ||
| 75 | * | ||
| 76 | * Unlike change_bit(), this function is non-atomic and may be reordered. | ||
| 77 | * If it's called on the same region of memory simultaneously, the effect | ||
| 78 | * may be that only one operation succeeds. | ||
| 79 | */ | ||
| 80 | |||
| 81 | #define __change_bit(nr, addr) (void)__test_and_change_bit(nr, addr) | ||
| 82 | |||
| 83 | /** | 67 | /** |
| 84 | * test_and_set_bit - Set a bit and return its old value | 68 | * test_and_set_bit - Set a bit and return its old value |
| 85 | * @nr: Bit to set | 69 | * @nr: Bit to set |
| @@ -101,19 +85,6 @@ static inline int test_and_set_bit(int nr, volatile unsigned long *addr) | |||
| 101 | retval = (mask & *adr) != 0; | 85 | retval = (mask & *adr) != 0; |
| 102 | *adr |= mask; | 86 | *adr |= mask; |
| 103 | cris_atomic_restore(addr, flags); | 87 | cris_atomic_restore(addr, flags); |
| 104 | local_irq_restore(flags); | ||
| 105 | return retval; | ||
| 106 | } | ||
| 107 | |||
| 108 | static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) | ||
| 109 | { | ||
| 110 | unsigned int mask, retval; | ||
| 111 | unsigned int *adr = (unsigned int *)addr; | ||
| 112 | |||
| 113 | adr += nr >> 5; | ||
| 114 | mask = 1 << (nr & 0x1f); | ||
| 115 | retval = (mask & *adr) != 0; | ||
| 116 | *adr |= mask; | ||
| 117 | return retval; | 88 | return retval; |
| 118 | } | 89 | } |
| 119 | 90 | ||
| @@ -148,27 +119,6 @@ static inline int test_and_clear_bit(int nr, volatile unsigned long *addr) | |||
| 148 | } | 119 | } |
| 149 | 120 | ||
| 150 | /** | 121 | /** |
| 151 | * __test_and_clear_bit - Clear a bit and return its old value | ||
| 152 | * @nr: Bit to clear | ||
| 153 | * @addr: Address to count from | ||
| 154 | * | ||
| 155 | * This operation is non-atomic and can be reordered. | ||
| 156 | * If two examples of this operation race, one can appear to succeed | ||
| 157 | * but actually fail. You must protect multiple accesses with a lock. | ||
| 158 | */ | ||
| 159 | |||
| 160 | static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) | ||
| 161 | { | ||
| 162 | unsigned int mask, retval; | ||
| 163 | unsigned int *adr = (unsigned int *)addr; | ||
| 164 | |||
| 165 | adr += nr >> 5; | ||
| 166 | mask = 1 << (nr & 0x1f); | ||
| 167 | retval = (mask & *adr) != 0; | ||
| 168 | *adr &= ~mask; | ||
| 169 | return retval; | ||
| 170 | } | ||
| 171 | /** | ||
| 172 | * test_and_change_bit - Change a bit and return its old value | 122 | * test_and_change_bit - Change a bit and return its old value |
| 173 | * @nr: Bit to change | 123 | * @nr: Bit to change |
| 174 | * @addr: Address to count from | 124 | * @addr: Address to count from |
| @@ -191,42 +141,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr) | |||
| 191 | return retval; | 141 | return retval; |
| 192 | } | 142 | } |
| 193 | 143 | ||
| 194 | /* WARNING: non atomic and it can be reordered! */ | 144 | #include <asm-generic/bitops/non-atomic.h> |
| 195 | |||
| 196 | static inline int __test_and_change_bit(int nr, volatile unsigned long *addr) | ||
| 197 | { | ||
| 198 | unsigned int mask, retval; | ||
| 199 | unsigned int *adr = (unsigned int *)addr; | ||
| 200 | |||
| 201 | adr += nr >> 5; | ||
| 202 | mask = 1 << (nr & 0x1f); | ||
| 203 | retval = (mask & *adr) != 0; | ||
| 204 | *adr ^= mask; | ||
| 205 | |||
| 206 | return retval; | ||
| 207 | } | ||
| 208 | |||
| 209 | /** | ||
| 210 | * test_bit - Determine whether a bit is set | ||
| 211 | * @nr: bit number to test | ||
| 212 | * @addr: Address to start counting from | ||
| 213 | * | ||
| 214 | * This routine doesn't need to be atomic. | ||
| 215 | */ | ||
| 216 | |||
| 217 | static inline int test_bit(int nr, const volatile unsigned long *addr) | ||
| 218 | { | ||
| 219 | unsigned int mask; | ||
| 220 | unsigned int *adr = (unsigned int *)addr; | ||
| 221 | |||
| 222 | adr += nr >> 5; | ||
| 223 | mask = 1 << (nr & 0x1f); | ||
| 224 | return ((mask & *adr) != 0); | ||
| 225 | } | ||
| 226 | |||
| 227 | /* | ||
| 228 | * Find-bit routines.. | ||
| 229 | */ | ||
| 230 | 145 | ||
| 231 | /* | 146 | /* |
| 232 | * Since we define it "external", it collides with the built-in | 147 | * Since we define it "external", it collides with the built-in |
| @@ -235,152 +150,18 @@ static inline int test_bit(int nr, const volatile unsigned long *addr) | |||
| 235 | */ | 150 | */ |
| 236 | #define ffs kernel_ffs | 151 | #define ffs kernel_ffs |
| 237 | 152 | ||
| 238 | /* | 153 | #include <asm-generic/bitops/fls.h> |
| 239 | * fls: find last bit set. | 154 | #include <asm-generic/bitops/fls64.h> |
| 240 | */ | 155 | #include <asm-generic/bitops/hweight.h> |
| 241 | 156 | #include <asm-generic/bitops/find.h> | |
| 242 | #define fls(x) generic_fls(x) | ||
| 243 | #define fls64(x) generic_fls64(x) | ||
| 244 | |||
| 245 | /* | ||
| 246 | * hweightN - returns the hamming weight of a N-bit word | ||
| 247 | * @x: the word to weigh | ||
| 248 | * | ||
| 249 | * The Hamming Weight of a number is the total number of bits set in it. | ||
| 250 | */ | ||
| 251 | 157 | ||
| 252 | #define hweight32(x) generic_hweight32(x) | 158 | #include <asm-generic/bitops/ext2-non-atomic.h> |
| 253 | #define hweight16(x) generic_hweight16(x) | ||
| 254 | #define hweight8(x) generic_hweight8(x) | ||
| 255 | 159 | ||
| 256 | /** | ||
| 257 | * find_next_zero_bit - find the first zero bit in a memory region | ||
| 258 | * @addr: The address to base the search on | ||
| 259 | * @offset: The bitnumber to start searching at | ||
| 260 | * @size: The maximum size to search | ||
| 261 | */ | ||
| 262 | static inline int find_next_zero_bit (const unsigned long * addr, int size, int offset) | ||
| 263 | { | ||
| 264 | unsigned long *p = ((unsigned long *) addr) + (offset >> 5); | ||
| 265 | unsigned long result = offset & ~31UL; | ||
| 266 | unsigned long tmp; | ||
| 267 | |||
| 268 | if (offset >= size) | ||
| 269 | return size; | ||
| 270 | size -= result; | ||
| 271 | offset &= 31UL; | ||
| 272 | if (offset) { | ||
| 273 | tmp = *(p++); | ||
| 274 | tmp |= ~0UL >> (32-offset); | ||
| 275 | if (size < 32) | ||
| 276 | goto found_first; | ||
| 277 | if (~tmp) | ||
| 278 | goto found_middle; | ||
| 279 | size -= 32; | ||
| 280 | result += 32; | ||
| 281 | } | ||
| 282 | while (size & ~31UL) { | ||
| 283 | if (~(tmp = *(p++))) | ||
| 284 | goto found_middle; | ||
| 285 | result += 32; | ||
| 286 | size -= 32; | ||
| 287 | } | ||
| 288 | if (!size) | ||
| 289 | return result; | ||
| 290 | tmp = *p; | ||
| 291 | |||
| 292 | found_first: | ||
| 293 | tmp |= ~0UL << size; | ||
| 294 | found_middle: | ||
| 295 | return result + ffz(tmp); | ||
| 296 | } | ||
| 297 | |||
| 298 | /** | ||
| 299 | * find_next_bit - find the first set bit in a memory region | ||
| 300 | * @addr: The address to base the search on | ||
| 301 | * @offset: The bitnumber to start searching at | ||
| 302 | * @size: The maximum size to search | ||
| 303 | */ | ||
| 304 | static __inline__ int find_next_bit(const unsigned long *addr, int size, int offset) | ||
| 305 | { | ||
| 306 | unsigned long *p = ((unsigned long *) addr) + (offset >> 5); | ||
| 307 | unsigned long result = offset & ~31UL; | ||
| 308 | unsigned long tmp; | ||
| 309 | |||
| 310 | if (offset >= size) | ||
| 311 | return size; | ||
| 312 | size -= result; | ||
| 313 | offset &= 31UL; | ||
| 314 | if (offset) { | ||
| 315 | tmp = *(p++); | ||
| 316 | tmp &= (~0UL << offset); | ||
| 317 | if (size < 32) | ||
| 318 | goto found_first; | ||
| 319 | if (tmp) | ||
| 320 | goto found_middle; | ||
| 321 | size -= 32; | ||
| 322 | result += 32; | ||
| 323 | } | ||
| 324 | while (size & ~31UL) { | ||
| 325 | if ((tmp = *(p++))) | ||
| 326 | goto found_middle; | ||
| 327 | result += 32; | ||
| 328 | size -= 32; | ||
| 329 | } | ||
| 330 | if (!size) | ||
| 331 | return result; | ||
| 332 | tmp = *p; | ||
| 333 | |||
| 334 | found_first: | ||
| 335 | tmp &= (~0UL >> (32 - size)); | ||
| 336 | if (tmp == 0UL) /* Are any bits set? */ | ||
| 337 | return result + size; /* Nope. */ | ||
| 338 | found_middle: | ||
| 339 | return result + __ffs(tmp); | ||
| 340 | } | ||
| 341 | |||
| 342 | /** | ||
| 343 | * find_first_zero_bit - find the first zero bit in a memory region | ||
| 344 | * @addr: The address to start the search at | ||
| 345 | * @size: The maximum size to search | ||
| 346 | * | ||
| 347 | * Returns the bit-number of the first zero bit, not the number of the byte | ||
| 348 | * containing a bit. | ||
| 349 | */ | ||
| 350 | |||
| 351 | #define find_first_zero_bit(addr, size) \ | ||
| 352 | find_next_zero_bit((addr), (size), 0) | ||
| 353 | #define find_first_bit(addr, size) \ | ||
| 354 | find_next_bit((addr), (size), 0) | ||
| 355 | |||
| 356 | #define ext2_set_bit test_and_set_bit | ||
| 357 | #define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a) | 160 | #define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a) |
| 358 | #define ext2_clear_bit test_and_clear_bit | ||
| 359 | #define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a) | 161 | #define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a) |
| 360 | #define ext2_test_bit test_bit | ||
| 361 | #define ext2_find_first_zero_bit find_first_zero_bit | ||
| 362 | #define ext2_find_next_zero_bit find_next_zero_bit | ||
| 363 | |||
| 364 | /* Bitmap functions for the minix filesystem. */ | ||
| 365 | #define minix_set_bit(nr,addr) test_and_set_bit(nr,addr) | ||
| 366 | #define minix_clear_bit(nr,addr) test_and_clear_bit(nr,addr) | ||
| 367 | #define minix_test_bit(nr,addr) test_bit(nr,addr) | ||
| 368 | #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) | ||
| 369 | 162 | ||
| 370 | static inline int sched_find_first_bit(const unsigned long *b) | 163 | #include <asm-generic/bitops/minix.h> |
| 371 | { | 164 | #include <asm-generic/bitops/sched.h> |
| 372 | if (unlikely(b[0])) | ||
| 373 | return __ffs(b[0]); | ||
| 374 | if (unlikely(b[1])) | ||
| 375 | return __ffs(b[1]) + 32; | ||
| 376 | if (unlikely(b[2])) | ||
| 377 | return __ffs(b[2]) + 64; | ||
| 378 | if (unlikely(b[3])) | ||
| 379 | return __ffs(b[3]) + 96; | ||
| 380 | if (b[4]) | ||
| 381 | return __ffs(b[4]) + 128; | ||
| 382 | return __ffs(b[5]) + 32 + 128; | ||
| 383 | } | ||
| 384 | 165 | ||
| 385 | #endif /* __KERNEL__ */ | 166 | #endif /* __KERNEL__ */ |
| 386 | 167 | ||
diff --git a/include/asm-cris/page.h b/include/asm-cris/page.h index c99c478c482f..3787633e6209 100644 --- a/include/asm-cris/page.h +++ b/include/asm-cris/page.h | |||
| @@ -43,8 +43,7 @@ typedef struct { unsigned long pgprot; } pgprot_t; | |||
| 43 | 43 | ||
| 44 | /* On CRIS the PFN numbers doesn't start at 0 so we have to compensate */ | 44 | /* On CRIS the PFN numbers doesn't start at 0 so we have to compensate */ |
| 45 | /* for that before indexing into the page table starting at mem_map */ | 45 | /* for that before indexing into the page table starting at mem_map */ |
| 46 | #define pfn_to_page(pfn) (mem_map + ((pfn) - (PAGE_OFFSET >> PAGE_SHIFT))) | 46 | #define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT) |
| 47 | #define page_to_pfn(page) ((unsigned long)((page) - mem_map) + (PAGE_OFFSET >> PAGE_SHIFT)) | ||
| 48 | #define pfn_valid(pfn) (((pfn) - (PAGE_OFFSET >> PAGE_SHIFT)) < max_mapnr) | 47 | #define pfn_valid(pfn) (((pfn) - (PAGE_OFFSET >> PAGE_SHIFT)) < max_mapnr) |
| 49 | 48 | ||
| 50 | /* to index into the page map. our pages all start at physical addr PAGE_OFFSET so | 49 | /* to index into the page map. our pages all start at physical addr PAGE_OFFSET so |
| @@ -77,6 +76,7 @@ typedef struct { unsigned long pgprot; } pgprot_t; | |||
| 77 | 76 | ||
| 78 | #endif /* __KERNEL__ */ | 77 | #endif /* __KERNEL__ */ |
| 79 | 78 | ||
| 79 | #include <asm-generic/memory_model.h> | ||
| 80 | #include <asm-generic/page.h> | 80 | #include <asm-generic/page.h> |
| 81 | 81 | ||
| 82 | #endif /* _CRIS_PAGE_H */ | 82 | #endif /* _CRIS_PAGE_H */ |
diff --git a/include/asm-frv/bitops.h b/include/asm-frv/bitops.h index f686b519878e..6344d06390b9 100644 --- a/include/asm-frv/bitops.h +++ b/include/asm-frv/bitops.h | |||
| @@ -22,20 +22,7 @@ | |||
| 22 | 22 | ||
| 23 | #ifdef __KERNEL__ | 23 | #ifdef __KERNEL__ |
| 24 | 24 | ||
| 25 | /* | 25 | #include <asm-generic/bitops/ffz.h> |
| 26 | * ffz = Find First Zero in word. Undefined if no zero exists, | ||
| 27 | * so code should check against ~0UL first.. | ||
| 28 | */ | ||
| 29 | static inline unsigned long ffz(unsigned long word) | ||
| 30 | { | ||
| 31 | unsigned long result = 0; | ||
| 32 | |||
| 33 | while (word & 1) { | ||
| 34 | result++; | ||
| 35 | word >>= 1; | ||
| 36 | } | ||
| 37 | return result; | ||
| 38 | } | ||
| 39 | 26 | ||
| 40 | /* | 27 | /* |
| 41 | * clear_bit() doesn't provide any barrier for the compiler. | 28 | * clear_bit() doesn't provide any barrier for the compiler. |
| @@ -171,51 +158,9 @@ static inline int __test_bit(int nr, const volatile void * addr) | |||
| 171 | __constant_test_bit((nr),(addr)) : \ | 158 | __constant_test_bit((nr),(addr)) : \ |
| 172 | __test_bit((nr),(addr))) | 159 | __test_bit((nr),(addr))) |
| 173 | 160 | ||
| 174 | extern int find_next_bit(const unsigned long *addr, int size, int offset); | 161 | #include <asm-generic/bitops/ffs.h> |
| 175 | 162 | #include <asm-generic/bitops/__ffs.h> | |
| 176 | #define find_first_bit(addr, size) find_next_bit(addr, size, 0) | 163 | #include <asm-generic/bitops/find.h> |
| 177 | |||
| 178 | #define find_first_zero_bit(addr, size) \ | ||
| 179 | find_next_zero_bit((addr), (size), 0) | ||
| 180 | |||
| 181 | static inline int find_next_zero_bit(const void *addr, int size, int offset) | ||
| 182 | { | ||
| 183 | const unsigned long *p = ((const unsigned long *) addr) + (offset >> 5); | ||
| 184 | unsigned long result = offset & ~31UL; | ||
| 185 | unsigned long tmp; | ||
| 186 | |||
| 187 | if (offset >= size) | ||
| 188 | return size; | ||
| 189 | size -= result; | ||
| 190 | offset &= 31UL; | ||
| 191 | if (offset) { | ||
| 192 | tmp = *(p++); | ||
| 193 | tmp |= ~0UL >> (32-offset); | ||
| 194 | if (size < 32) | ||
| 195 | goto found_first; | ||
| 196 | if (~tmp) | ||
| 197 | goto found_middle; | ||
| 198 | size -= 32; | ||
| 199 | result += 32; | ||
| 200 | } | ||
| 201 | while (size & ~31UL) { | ||
| 202 | if (~(tmp = *(p++))) | ||
| 203 | goto found_middle; | ||
| 204 | result += 32; | ||
| 205 | size -= 32; | ||
| 206 | } | ||
| 207 | if (!size) | ||
| 208 | return result; | ||
| 209 | tmp = *p; | ||
| 210 | |||
| 211 | found_first: | ||
| 212 | tmp |= ~0UL << size; | ||
| 213 | found_middle: | ||
| 214 | return result + ffz(tmp); | ||
| 215 | } | ||
| 216 | |||
| 217 | #define ffs(x) generic_ffs(x) | ||
| 218 | #define __ffs(x) (ffs(x) - 1) | ||
| 219 | 164 | ||
| 220 | /* | 165 | /* |
| 221 | * fls: find last bit set. | 166 | * fls: find last bit set. |
| @@ -228,114 +173,17 @@ found_middle: | |||
| 228 | \ | 173 | \ |
| 229 | bit ? 33 - bit : bit; \ | 174 | bit ? 33 - bit : bit; \ |
| 230 | }) | 175 | }) |
| 231 | #define fls64(x) generic_fls64(x) | ||
| 232 | 176 | ||
| 233 | /* | 177 | #include <asm-generic/bitops/fls64.h> |
| 234 | * Every architecture must define this function. It's the fastest | 178 | #include <asm-generic/bitops/sched.h> |
| 235 | * way of searching a 140-bit bitmap where the first 100 bits are | 179 | #include <asm-generic/bitops/hweight.h> |
| 236 | * unlikely to be set. It's guaranteed that at least one of the 140 | ||
| 237 | * bits is cleared. | ||
| 238 | */ | ||
| 239 | static inline int sched_find_first_bit(const unsigned long *b) | ||
| 240 | { | ||
| 241 | if (unlikely(b[0])) | ||
| 242 | return __ffs(b[0]); | ||
| 243 | if (unlikely(b[1])) | ||
| 244 | return __ffs(b[1]) + 32; | ||
| 245 | if (unlikely(b[2])) | ||
| 246 | return __ffs(b[2]) + 64; | ||
| 247 | if (b[3]) | ||
| 248 | return __ffs(b[3]) + 96; | ||
| 249 | return __ffs(b[4]) + 128; | ||
| 250 | } | ||
| 251 | 180 | ||
| 181 | #include <asm-generic/bitops/ext2-non-atomic.h> | ||
| 252 | 182 | ||
| 253 | /* | 183 | #define ext2_set_bit_atomic(lock,nr,addr) test_and_set_bit ((nr) ^ 0x18, (addr)) |
| 254 | * hweightN: returns the hamming weight (i.e. the number | 184 | #define ext2_clear_bit_atomic(lock,nr,addr) test_and_clear_bit((nr) ^ 0x18, (addr)) |
| 255 | * of bits set) of a N-bit word | ||
| 256 | */ | ||
| 257 | |||
| 258 | #define hweight32(x) generic_hweight32(x) | ||
| 259 | #define hweight16(x) generic_hweight16(x) | ||
| 260 | #define hweight8(x) generic_hweight8(x) | ||
| 261 | |||
| 262 | #define ext2_set_bit(nr, addr) test_and_set_bit ((nr) ^ 0x18, (addr)) | ||
| 263 | #define ext2_clear_bit(nr, addr) test_and_clear_bit((nr) ^ 0x18, (addr)) | ||
| 264 | |||
| 265 | #define ext2_set_bit_atomic(lock,nr,addr) ext2_set_bit((nr), addr) | ||
| 266 | #define ext2_clear_bit_atomic(lock,nr,addr) ext2_clear_bit((nr), addr) | ||
| 267 | |||
| 268 | static inline int ext2_test_bit(int nr, const volatile void * addr) | ||
| 269 | { | ||
| 270 | const volatile unsigned char *ADDR = (const unsigned char *) addr; | ||
| 271 | int mask; | ||
| 272 | |||
| 273 | ADDR += nr >> 3; | ||
| 274 | mask = 1 << (nr & 0x07); | ||
| 275 | return ((mask & *ADDR) != 0); | ||
| 276 | } | ||
| 277 | |||
| 278 | #define ext2_find_first_zero_bit(addr, size) \ | ||
| 279 | ext2_find_next_zero_bit((addr), (size), 0) | ||
| 280 | |||
| 281 | static inline unsigned long ext2_find_next_zero_bit(const void *addr, | ||
| 282 | unsigned long size, | ||
| 283 | unsigned long offset) | ||
| 284 | { | ||
| 285 | const unsigned long *p = ((const unsigned long *) addr) + (offset >> 5); | ||
| 286 | unsigned long result = offset & ~31UL; | ||
| 287 | unsigned long tmp; | ||
| 288 | |||
| 289 | if (offset >= size) | ||
| 290 | return size; | ||
| 291 | size -= result; | ||
| 292 | offset &= 31UL; | ||
| 293 | if(offset) { | ||
| 294 | /* We hold the little endian value in tmp, but then the | ||
| 295 | * shift is illegal. So we could keep a big endian value | ||
| 296 | * in tmp, like this: | ||
| 297 | * | ||
| 298 | * tmp = __swab32(*(p++)); | ||
| 299 | * tmp |= ~0UL >> (32-offset); | ||
| 300 | * | ||
| 301 | * but this would decrease preformance, so we change the | ||
| 302 | * shift: | ||
| 303 | */ | ||
| 304 | tmp = *(p++); | ||
| 305 | tmp |= __swab32(~0UL >> (32-offset)); | ||
| 306 | if(size < 32) | ||
| 307 | goto found_first; | ||
| 308 | if(~tmp) | ||
| 309 | goto found_middle; | ||
| 310 | size -= 32; | ||
| 311 | result += 32; | ||
| 312 | } | ||
| 313 | while(size & ~31UL) { | ||
| 314 | if(~(tmp = *(p++))) | ||
| 315 | goto found_middle; | ||
| 316 | result += 32; | ||
| 317 | size -= 32; | ||
| 318 | } | ||
| 319 | if(!size) | ||
| 320 | return result; | ||
| 321 | tmp = *p; | ||
| 322 | |||
| 323 | found_first: | ||
| 324 | /* tmp is little endian, so we would have to swab the shift, | ||
| 325 | * see above. But then we have to swab tmp below for ffz, so | ||
| 326 | * we might as well do this here. | ||
| 327 | */ | ||
| 328 | return result + ffz(__swab32(tmp) | (~0UL << size)); | ||
| 329 | found_middle: | ||
| 330 | return result + ffz(__swab32(tmp)); | ||
| 331 | } | ||
| 332 | 185 | ||
| 333 | /* Bitmap functions for the minix filesystem. */ | 186 | #include <asm-generic/bitops/minix-le.h> |
| 334 | #define minix_test_and_set_bit(nr,addr) ext2_set_bit(nr,addr) | ||
| 335 | #define minix_set_bit(nr,addr) ext2_set_bit(nr,addr) | ||
| 336 | #define minix_test_and_clear_bit(nr,addr) ext2_clear_bit(nr,addr) | ||
| 337 | #define minix_test_bit(nr,addr) ext2_test_bit(nr,addr) | ||
| 338 | #define minix_find_first_zero_bit(addr,size) ext2_find_first_zero_bit(addr,size) | ||
| 339 | 187 | ||
| 340 | #endif /* __KERNEL__ */ | 188 | #endif /* __KERNEL__ */ |
| 341 | 189 | ||
diff --git a/include/asm-frv/futex.h b/include/asm-frv/futex.h index fca9d90e32c9..08b3d1da3583 100644 --- a/include/asm-frv/futex.h +++ b/include/asm-frv/futex.h | |||
| @@ -9,5 +9,11 @@ | |||
| 9 | 9 | ||
| 10 | extern int futex_atomic_op_inuser(int encoded_op, int __user *uaddr); | 10 | extern int futex_atomic_op_inuser(int encoded_op, int __user *uaddr); |
| 11 | 11 | ||
| 12 | static inline int | ||
| 13 | futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) | ||
| 14 | { | ||
| 15 | return -ENOSYS; | ||
| 16 | } | ||
| 17 | |||
| 12 | #endif | 18 | #endif |
| 13 | #endif | 19 | #endif |
diff --git a/include/asm-frv/page.h b/include/asm-frv/page.h index b8221b611b5c..dc0f7e08a4c2 100644 --- a/include/asm-frv/page.h +++ b/include/asm-frv/page.h | |||
| @@ -57,13 +57,9 @@ extern unsigned long min_low_pfn; | |||
| 57 | extern unsigned long max_pfn; | 57 | extern unsigned long max_pfn; |
| 58 | 58 | ||
| 59 | #ifdef CONFIG_MMU | 59 | #ifdef CONFIG_MMU |
| 60 | #define pfn_to_page(pfn) (mem_map + (pfn)) | ||
| 61 | #define page_to_pfn(page) ((unsigned long) ((page) - mem_map)) | ||
| 62 | #define pfn_valid(pfn) ((pfn) < max_mapnr) | 60 | #define pfn_valid(pfn) ((pfn) < max_mapnr) |
| 63 | |||
| 64 | #else | 61 | #else |
| 65 | #define pfn_to_page(pfn) (&mem_map[(pfn) - (PAGE_OFFSET >> PAGE_SHIFT)]) | 62 | #define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT) |
| 66 | #define page_to_pfn(page) ((PAGE_OFFSET >> PAGE_SHIFT) + (unsigned long) ((page) - mem_map)) | ||
| 67 | #define pfn_valid(pfn) ((pfn) >= min_low_pfn && (pfn) < max_low_pfn) | 63 | #define pfn_valid(pfn) ((pfn) >= min_low_pfn && (pfn) < max_low_pfn) |
| 68 | 64 | ||
| 69 | #endif | 65 | #endif |
| @@ -87,6 +83,7 @@ extern unsigned long max_pfn; | |||
| 87 | #define WANT_PAGE_VIRTUAL 1 | 83 | #define WANT_PAGE_VIRTUAL 1 |
| 88 | #endif | 84 | #endif |
| 89 | 85 | ||
| 86 | #include <asm-generic/memory_model.h> | ||
| 90 | #include <asm-generic/page.h> | 87 | #include <asm-generic/page.h> |
| 91 | 88 | ||
| 92 | #endif /* _ASM_PAGE_H */ | 89 | #endif /* _ASM_PAGE_H */ |
diff --git a/include/asm-generic/bitops.h b/include/asm-generic/bitops.h index 0e6d9852008c..1f9d99193df8 100644 --- a/include/asm-generic/bitops.h +++ b/include/asm-generic/bitops.h | |||
| @@ -5,77 +5,27 @@ | |||
| 5 | * For the benefit of those who are trying to port Linux to another | 5 | * For the benefit of those who are trying to port Linux to another |
| 6 | * architecture, here are some C-language equivalents. You should | 6 | * architecture, here are some C-language equivalents. You should |
| 7 | * recode these in the native assembly language, if at all possible. | 7 | * recode these in the native assembly language, if at all possible. |
| 8 | * To guarantee atomicity, these routines call cli() and sti() to | ||
| 9 | * disable interrupts while they operate. (You have to provide inline | ||
| 10 | * routines to cli() and sti().) | ||
| 11 | * | ||
| 12 | * Also note, these routines assume that you have 32 bit longs. | ||
| 13 | * You will have to change this if you are trying to port Linux to the | ||
| 14 | * Alpha architecture or to a Cray. :-) | ||
| 15 | * | 8 | * |
| 16 | * C language equivalents written by Theodore Ts'o, 9/26/92 | 9 | * C language equivalents written by Theodore Ts'o, 9/26/92 |
| 17 | */ | 10 | */ |
| 18 | 11 | ||
| 19 | extern __inline__ int set_bit(int nr,long * addr) | 12 | #include <asm-generic/bitops/atomic.h> |
| 20 | { | 13 | #include <asm-generic/bitops/non-atomic.h> |
| 21 | int mask, retval; | 14 | #include <asm-generic/bitops/__ffs.h> |
| 22 | 15 | #include <asm-generic/bitops/ffz.h> | |
| 23 | addr += nr >> 5; | 16 | #include <asm-generic/bitops/fls.h> |
| 24 | mask = 1 << (nr & 0x1f); | 17 | #include <asm-generic/bitops/fls64.h> |
| 25 | cli(); | 18 | #include <asm-generic/bitops/find.h> |
| 26 | retval = (mask & *addr) != 0; | ||
| 27 | *addr |= mask; | ||
| 28 | sti(); | ||
| 29 | return retval; | ||
| 30 | } | ||
| 31 | |||
| 32 | extern __inline__ int clear_bit(int nr, long * addr) | ||
| 33 | { | ||
| 34 | int mask, retval; | ||
| 35 | |||
| 36 | addr += nr >> 5; | ||
| 37 | mask = 1 << (nr & 0x1f); | ||
| 38 | cli(); | ||
| 39 | retval = (mask & *addr) != 0; | ||
| 40 | *addr &= ~mask; | ||
| 41 | sti(); | ||
| 42 | return retval; | ||
| 43 | } | ||
| 44 | |||
| 45 | extern __inline__ int test_bit(int nr, const unsigned long * addr) | ||
| 46 | { | ||
| 47 | int mask; | ||
| 48 | |||
| 49 | addr += nr >> 5; | ||
| 50 | mask = 1 << (nr & 0x1f); | ||
| 51 | return ((mask & *addr) != 0); | ||
| 52 | } | ||
| 53 | |||
| 54 | /* | ||
| 55 | * fls: find last bit set. | ||
| 56 | */ | ||
| 57 | |||
| 58 | #define fls(x) generic_fls(x) | ||
| 59 | #define fls64(x) generic_fls64(x) | ||
| 60 | 19 | ||
| 61 | #ifdef __KERNEL__ | 20 | #ifdef __KERNEL__ |
| 62 | 21 | ||
| 63 | /* | 22 | #include <asm-generic/bitops/sched.h> |
| 64 | * ffs: find first bit set. This is defined the same way as | 23 | #include <asm-generic/bitops/ffs.h> |
| 65 | * the libc and compiler builtin ffs routines, therefore | 24 | #include <asm-generic/bitops/hweight.h> |
| 66 | * differs in spirit from the above ffz (man ffs). | ||
| 67 | */ | ||
| 68 | |||
| 69 | #define ffs(x) generic_ffs(x) | ||
| 70 | |||
| 71 | /* | ||
| 72 | * hweightN: returns the hamming weight (i.e. the number | ||
| 73 | * of bits set) of a N-bit word | ||
| 74 | */ | ||
| 75 | 25 | ||
| 76 | #define hweight32(x) generic_hweight32(x) | 26 | #include <asm-generic/bitops/ext2-non-atomic.h> |
| 77 | #define hweight16(x) generic_hweight16(x) | 27 | #include <asm-generic/bitops/ext2-atomic.h> |
| 78 | #define hweight8(x) generic_hweight8(x) | 28 | #include <asm-generic/bitops/minix.h> |
| 79 | 29 | ||
| 80 | #endif /* __KERNEL__ */ | 30 | #endif /* __KERNEL__ */ |
| 81 | 31 | ||
diff --git a/include/asm-generic/bitops/__ffs.h b/include/asm-generic/bitops/__ffs.h new file mode 100644 index 000000000000..9a3274aecf83 --- /dev/null +++ b/include/asm-generic/bitops/__ffs.h | |||
| @@ -0,0 +1,43 @@ | |||
| 1 | #ifndef _ASM_GENERIC_BITOPS___FFS_H_ | ||
| 2 | #define _ASM_GENERIC_BITOPS___FFS_H_ | ||
| 3 | |||
| 4 | #include <asm/types.h> | ||
| 5 | |||
| 6 | /** | ||
| 7 | * __ffs - find first bit in word. | ||
| 8 | * @word: The word to search | ||
| 9 | * | ||
| 10 | * Undefined if no bit exists, so code should check against 0 first. | ||
| 11 | */ | ||
| 12 | static inline unsigned long __ffs(unsigned long word) | ||
| 13 | { | ||
| 14 | int num = 0; | ||
| 15 | |||
| 16 | #if BITS_PER_LONG == 64 | ||
| 17 | if ((word & 0xffffffff) == 0) { | ||
| 18 | num += 32; | ||
| 19 | word >>= 32; | ||
| 20 | } | ||
| 21 | #endif | ||
| 22 | if ((word & 0xffff) == 0) { | ||
| 23 | num += 16; | ||
| 24 | word >>= 16; | ||
| 25 | } | ||
| 26 | if ((word & 0xff) == 0) { | ||
| 27 | num += 8; | ||
| 28 | word >>= 8; | ||
| 29 | } | ||
| 30 | if ((word & 0xf) == 0) { | ||
| 31 | num += 4; | ||
| 32 | word >>= 4; | ||
| 33 | } | ||
| 34 | if ((word & 0x3) == 0) { | ||
| 35 | num += 2; | ||
| 36 | word >>= 2; | ||
| 37 | } | ||
| 38 | if ((word & 0x1) == 0) | ||
| 39 | num += 1; | ||
| 40 | return num; | ||
| 41 | } | ||
| 42 | |||
| 43 | #endif /* _ASM_GENERIC_BITOPS___FFS_H_ */ | ||
diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h new file mode 100644 index 000000000000..78339319ba02 --- /dev/null +++ b/include/asm-generic/bitops/atomic.h | |||
| @@ -0,0 +1,191 @@ | |||
| 1 | #ifndef _ASM_GENERIC_BITOPS_ATOMIC_H_ | ||
| 2 | #define _ASM_GENERIC_BITOPS_ATOMIC_H_ | ||
| 3 | |||
| 4 | #include <asm/types.h> | ||
| 5 | |||
| 6 | #define BITOP_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) | ||
| 7 | #define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) | ||
| 8 | |||
| 9 | #ifdef CONFIG_SMP | ||
| 10 | #include <asm/spinlock.h> | ||
| 11 | #include <asm/cache.h> /* we use L1_CACHE_BYTES */ | ||
| 12 | |||
| 13 | /* Use an array of spinlocks for our atomic_ts. | ||
| 14 | * Hash function to index into a different SPINLOCK. | ||
| 15 | * Since "a" is usually an address, use one spinlock per cacheline. | ||
| 16 | */ | ||
| 17 | # define ATOMIC_HASH_SIZE 4 | ||
| 18 | # define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ])) | ||
| 19 | |||
| 20 | extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; | ||
| 21 | |||
| 22 | /* Can't use raw_spin_lock_irq because of #include problems, so | ||
| 23 | * this is the substitute */ | ||
| 24 | #define _atomic_spin_lock_irqsave(l,f) do { \ | ||
| 25 | raw_spinlock_t *s = ATOMIC_HASH(l); \ | ||
| 26 | local_irq_save(f); \ | ||
| 27 | __raw_spin_lock(s); \ | ||
| 28 | } while(0) | ||
| 29 | |||
| 30 | #define _atomic_spin_unlock_irqrestore(l,f) do { \ | ||
| 31 | raw_spinlock_t *s = ATOMIC_HASH(l); \ | ||
| 32 | __raw_spin_unlock(s); \ | ||
| 33 | local_irq_restore(f); \ | ||
| 34 | } while(0) | ||
| 35 | |||
| 36 | |||
| 37 | #else | ||
| 38 | # define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0) | ||
| 39 | # define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0) | ||
| 40 | #endif | ||
| 41 | |||
| 42 | /* | ||
| 43 | * NMI events can occur at any time, including when interrupts have been | ||
| 44 | * disabled by *_irqsave(). So you can get NMI events occurring while a | ||
| 45 | * *_bit function is holding a spin lock. If the NMI handler also wants | ||
| 46 | * to do bit manipulation (and they do) then you can get a deadlock | ||
| 47 | * between the original caller of *_bit() and the NMI handler. | ||
| 48 | * | ||
| 49 | * by Keith Owens | ||
| 50 | */ | ||
| 51 | |||
| 52 | /** | ||
| 53 | * set_bit - Atomically set a bit in memory | ||
| 54 | * @nr: the bit to set | ||
| 55 | * @addr: the address to start counting from | ||
| 56 | * | ||
| 57 | * This function is atomic and may not be reordered. See __set_bit() | ||
| 58 | * if you do not require the atomic guarantees. | ||
| 59 | * | ||
| 60 | * Note: there are no guarantees that this function will not be reordered | ||
| 61 | * on non x86 architectures, so if you are writting portable code, | ||
| 62 | * make sure not to rely on its reordering guarantees. | ||
| 63 | * | ||
| 64 | * Note that @nr may be almost arbitrarily large; this function is not | ||
| 65 | * restricted to acting on a single-word quantity. | ||
| 66 | */ | ||
| 67 | static inline void set_bit(int nr, volatile unsigned long *addr) | ||
| 68 | { | ||
| 69 | unsigned long mask = BITOP_MASK(nr); | ||
| 70 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | ||
| 71 | unsigned long flags; | ||
| 72 | |||
| 73 | _atomic_spin_lock_irqsave(p, flags); | ||
| 74 | *p |= mask; | ||
| 75 | _atomic_spin_unlock_irqrestore(p, flags); | ||
| 76 | } | ||
| 77 | |||
| 78 | /** | ||
| 79 | * clear_bit - Clears a bit in memory | ||
| 80 | * @nr: Bit to clear | ||
| 81 | * @addr: Address to start counting from | ||
| 82 | * | ||
| 83 | * clear_bit() is atomic and may not be reordered. However, it does | ||
| 84 | * not contain a memory barrier, so if it is used for locking purposes, | ||
| 85 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() | ||
| 86 | * in order to ensure changes are visible on other processors. | ||
| 87 | */ | ||
| 88 | static inline void clear_bit(int nr, volatile unsigned long *addr) | ||
| 89 | { | ||
| 90 | unsigned long mask = BITOP_MASK(nr); | ||
| 91 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | ||
| 92 | unsigned long flags; | ||
| 93 | |||
| 94 | _atomic_spin_lock_irqsave(p, flags); | ||
| 95 | *p &= ~mask; | ||
| 96 | _atomic_spin_unlock_irqrestore(p, flags); | ||
| 97 | } | ||
| 98 | |||
| 99 | /** | ||
| 100 | * change_bit - Toggle a bit in memory | ||
| 101 | * @nr: Bit to change | ||
| 102 | * @addr: Address to start counting from | ||
| 103 | * | ||
| 104 | * change_bit() is atomic and may not be reordered. It may be | ||
| 105 | * reordered on other architectures than x86. | ||
| 106 | * Note that @nr may be almost arbitrarily large; this function is not | ||
| 107 | * restricted to acting on a single-word quantity. | ||
| 108 | */ | ||
| 109 | static inline void change_bit(int nr, volatile unsigned long *addr) | ||
| 110 | { | ||
| 111 | unsigned long mask = BITOP_MASK(nr); | ||
| 112 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | ||
| 113 | unsigned long flags; | ||
| 114 | |||
| 115 | _atomic_spin_lock_irqsave(p, flags); | ||
| 116 | *p ^= mask; | ||
| 117 | _atomic_spin_unlock_irqrestore(p, flags); | ||
| 118 | } | ||
| 119 | |||
| 120 | /** | ||
| 121 | * test_and_set_bit - Set a bit and return its old value | ||
| 122 | * @nr: Bit to set | ||
| 123 | * @addr: Address to count from | ||
| 124 | * | ||
| 125 | * This operation is atomic and cannot be reordered. | ||
| 126 | * It may be reordered on other architectures than x86. | ||
| 127 | * It also implies a memory barrier. | ||
| 128 | */ | ||
| 129 | static inline int test_and_set_bit(int nr, volatile unsigned long *addr) | ||
| 130 | { | ||
| 131 | unsigned long mask = BITOP_MASK(nr); | ||
| 132 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | ||
| 133 | unsigned long old; | ||
| 134 | unsigned long flags; | ||
| 135 | |||
| 136 | _atomic_spin_lock_irqsave(p, flags); | ||
| 137 | old = *p; | ||
| 138 | *p = old | mask; | ||
| 139 | _atomic_spin_unlock_irqrestore(p, flags); | ||
| 140 | |||
| 141 | return (old & mask) != 0; | ||
| 142 | } | ||
| 143 | |||
| 144 | /** | ||
| 145 | * test_and_clear_bit - Clear a bit and return its old value | ||
| 146 | * @nr: Bit to clear | ||
| 147 | * @addr: Address to count from | ||
| 148 | * | ||
| 149 | * This operation is atomic and cannot be reordered. | ||
| 150 | * It can be reorderdered on other architectures other than x86. | ||
| 151 | * It also implies a memory barrier. | ||
| 152 | */ | ||
| 153 | static inline int test_and_clear_bit(int nr, volatile unsigned long *addr) | ||
| 154 | { | ||
| 155 | unsigned long mask = BITOP_MASK(nr); | ||
| 156 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | ||
| 157 | unsigned long old; | ||
| 158 | unsigned long flags; | ||
| 159 | |||
| 160 | _atomic_spin_lock_irqsave(p, flags); | ||
| 161 | old = *p; | ||
| 162 | *p = old & ~mask; | ||
| 163 | _atomic_spin_unlock_irqrestore(p, flags); | ||
| 164 | |||
| 165 | return (old & mask) != 0; | ||
| 166 | } | ||
| 167 | |||
| 168 | /** | ||
| 169 | * test_and_change_bit - Change a bit and return its old value | ||
| 170 | * @nr: Bit to change | ||
| 171 | * @addr: Address to count from | ||
| 172 | * | ||
| 173 | * This operation is atomic and cannot be reordered. | ||
| 174 | * It also implies a memory barrier. | ||
| 175 | */ | ||
| 176 | static inline int test_and_change_bit(int nr, volatile unsigned long *addr) | ||
| 177 | { | ||
| 178 | unsigned long mask = BITOP_MASK(nr); | ||
| 179 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | ||
| 180 | unsigned long old; | ||
| 181 | unsigned long flags; | ||
| 182 | |||
| 183 | _atomic_spin_lock_irqsave(p, flags); | ||
| 184 | old = *p; | ||
| 185 | *p = old ^ mask; | ||
| 186 | _atomic_spin_unlock_irqrestore(p, flags); | ||
| 187 | |||
| 188 | return (old & mask) != 0; | ||
| 189 | } | ||
| 190 | |||
| 191 | #endif /* _ASM_GENERIC_BITOPS_ATOMIC_H */ | ||
diff --git a/include/asm-generic/bitops/ext2-atomic.h b/include/asm-generic/bitops/ext2-atomic.h new file mode 100644 index 000000000000..ab1c875efb74 --- /dev/null +++ b/include/asm-generic/bitops/ext2-atomic.h | |||
| @@ -0,0 +1,22 @@ | |||
| 1 | #ifndef _ASM_GENERIC_BITOPS_EXT2_ATOMIC_H_ | ||
| 2 | #define _ASM_GENERIC_BITOPS_EXT2_ATOMIC_H_ | ||
| 3 | |||
| 4 | #define ext2_set_bit_atomic(lock, nr, addr) \ | ||
| 5 | ({ \ | ||
| 6 | int ret; \ | ||
| 7 | spin_lock(lock); \ | ||
| 8 | ret = ext2_set_bit((nr), (unsigned long *)(addr)); \ | ||
| 9 | spin_unlock(lock); \ | ||
| 10 | ret; \ | ||
| 11 | }) | ||
| 12 | |||
| 13 | #define ext2_clear_bit_atomic(lock, nr, addr) \ | ||
| 14 | ({ \ | ||
| 15 | int ret; \ | ||
| 16 | spin_lock(lock); \ | ||
| 17 | ret = ext2_clear_bit((nr), (unsigned long *)(addr)); \ | ||
| 18 | spin_unlock(lock); \ | ||
| 19 | ret; \ | ||
| 20 | }) | ||
| 21 | |||
| 22 | #endif /* _ASM_GENERIC_BITOPS_EXT2_ATOMIC_H_ */ | ||
diff --git a/include/asm-generic/bitops/ext2-non-atomic.h b/include/asm-generic/bitops/ext2-non-atomic.h new file mode 100644 index 000000000000..1697404afa05 --- /dev/null +++ b/include/asm-generic/bitops/ext2-non-atomic.h | |||
| @@ -0,0 +1,18 @@ | |||
| 1 | #ifndef _ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_ | ||
| 2 | #define _ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_ | ||
| 3 | |||
| 4 | #include <asm-generic/bitops/le.h> | ||
| 5 | |||
| 6 | #define ext2_set_bit(nr,addr) \ | ||
| 7 | generic___test_and_set_le_bit((nr),(unsigned long *)(addr)) | ||
| 8 | #define ext2_clear_bit(nr,addr) \ | ||
| 9 | generic___test_and_clear_le_bit((nr),(unsigned long *)(addr)) | ||
| 10 | |||
| 11 | #define ext2_test_bit(nr,addr) \ | ||
| 12 | generic_test_le_bit((nr),(unsigned long *)(addr)) | ||
| 13 | #define ext2_find_first_zero_bit(addr, size) \ | ||
| 14 | generic_find_first_zero_le_bit((unsigned long *)(addr), (size)) | ||
| 15 | #define ext2_find_next_zero_bit(addr, size, off) \ | ||
| 16 | generic_find_next_zero_le_bit((unsigned long *)(addr), (size), (off)) | ||
| 17 | |||
| 18 | #endif /* _ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_ */ | ||
diff --git a/include/asm-generic/bitops/ffs.h b/include/asm-generic/bitops/ffs.h new file mode 100644 index 000000000000..fbbb43af7dc0 --- /dev/null +++ b/include/asm-generic/bitops/ffs.h | |||
| @@ -0,0 +1,41 @@ | |||
| 1 | #ifndef _ASM_GENERIC_BITOPS_FFS_H_ | ||
| 2 | #define _ASM_GENERIC_BITOPS_FFS_H_ | ||
| 3 | |||
| 4 | /** | ||
| 5 | * ffs - find first bit set | ||
| 6 | * @x: the word to search | ||
| 7 | * | ||
| 8 | * This is defined the same way as | ||
| 9 | * the libc and compiler builtin ffs routines, therefore | ||
| 10 | * differs in spirit from the above ffz (man ffs). | ||
| 11 | */ | ||
| 12 | static inline int ffs(int x) | ||
| 13 | { | ||
| 14 | int r = 1; | ||
| 15 | |||
| 16 | if (!x) | ||
| 17 | return 0; | ||
| 18 | if (!(x & 0xffff)) { | ||
| 19 | x >>= 16; | ||
| 20 | r += 16; | ||
| 21 | } | ||
| 22 | if (!(x & 0xff)) { | ||
| 23 | x >>= 8; | ||
| 24 | r += 8; | ||
| 25 | } | ||
| 26 | if (!(x & 0xf)) { | ||
| 27 | x >>= 4; | ||
| 28 | r += 4; | ||
| 29 | } | ||
| 30 | if (!(x & 3)) { | ||
| 31 | x >>= 2; | ||
| 32 | r += 2; | ||
| 33 | } | ||
| 34 | if (!(x & 1)) { | ||
| 35 | x >>= 1; | ||
| 36 | r += 1; | ||
| 37 | } | ||
| 38 | return r; | ||
| 39 | } | ||
| 40 | |||
| 41 | #endif /* _ASM_GENERIC_BITOPS_FFS_H_ */ | ||
diff --git a/include/asm-generic/bitops/ffz.h b/include/asm-generic/bitops/ffz.h new file mode 100644 index 000000000000..6744bd4cdf46 --- /dev/null +++ b/include/asm-generic/bitops/ffz.h | |||
| @@ -0,0 +1,12 @@ | |||
| 1 | #ifndef _ASM_GENERIC_BITOPS_FFZ_H_ | ||
| 2 | #define _ASM_GENERIC_BITOPS_FFZ_H_ | ||
| 3 | |||
| 4 | /* | ||
| 5 | * ffz - find first zero in word. | ||
| 6 | * @word: The word to search | ||
| 7 | * | ||
| 8 | * Undefined if no zero exists, so code should check against ~0UL first. | ||
| 9 | */ | ||
| 10 | #define ffz(x) __ffs(~(x)) | ||
| 11 | |||
| 12 | #endif /* _ASM_GENERIC_BITOPS_FFZ_H_ */ | ||
diff --git a/include/asm-generic/bitops/find.h b/include/asm-generic/bitops/find.h new file mode 100644 index 000000000000..72a51e5a12ef --- /dev/null +++ b/include/asm-generic/bitops/find.h | |||
| @@ -0,0 +1,13 @@ | |||
| 1 | #ifndef _ASM_GENERIC_BITOPS_FIND_H_ | ||
| 2 | #define _ASM_GENERIC_BITOPS_FIND_H_ | ||
| 3 | |||
| 4 | extern unsigned long find_next_bit(const unsigned long *addr, unsigned long | ||
| 5 | size, unsigned long offset); | ||
| 6 | |||
| 7 | extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned | ||
| 8 | long size, unsigned long offset); | ||
| 9 | |||
| 10 | #define find_first_bit(addr, size) find_next_bit((addr), (size), 0) | ||
| 11 | #define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0) | ||
| 12 | |||
| 13 | #endif /*_ASM_GENERIC_BITOPS_FIND_H_ */ | ||
diff --git a/include/asm-generic/bitops/fls.h b/include/asm-generic/bitops/fls.h new file mode 100644 index 000000000000..850859bc5069 --- /dev/null +++ b/include/asm-generic/bitops/fls.h | |||
| @@ -0,0 +1,41 @@ | |||
| 1 | #ifndef _ASM_GENERIC_BITOPS_FLS_H_ | ||
| 2 | #define _ASM_GENERIC_BITOPS_FLS_H_ | ||
| 3 | |||
| 4 | /** | ||
| 5 | * fls - find last (most-significant) bit set | ||
| 6 | * @x: the word to search | ||
| 7 | * | ||
| 8 | * This is defined the same way as ffs. | ||
| 9 | * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. | ||
| 10 | */ | ||
| 11 | |||
| 12 | static inline int fls(int x) | ||
| 13 | { | ||
| 14 | int r = 32; | ||
| 15 | |||
| 16 | if (!x) | ||
| 17 | return 0; | ||
| 18 | if (!(x & 0xffff0000u)) { | ||
| 19 | x <<= 16; | ||
| 20 | r -= 16; | ||
| 21 | } | ||
| 22 | if (!(x & 0xff000000u)) { | ||
| 23 | x <<= 8; | ||
| 24 | r -= 8; | ||
| 25 | } | ||
| 26 | if (!(x & 0xf0000000u)) { | ||
| 27 | x <<= 4; | ||
| 28 | r -= 4; | ||
| 29 | } | ||
| 30 | if (!(x & 0xc0000000u)) { | ||
| 31 | x <<= 2; | ||
| 32 | r -= 2; | ||
| 33 | } | ||
| 34 | if (!(x & 0x80000000u)) { | ||
| 35 | x <<= 1; | ||
| 36 | r -= 1; | ||
| 37 | } | ||
| 38 | return r; | ||
| 39 | } | ||
| 40 | |||
| 41 | #endif /* _ASM_GENERIC_BITOPS_FLS_H_ */ | ||
diff --git a/include/asm-generic/bitops/fls64.h b/include/asm-generic/bitops/fls64.h new file mode 100644 index 000000000000..1b6b17ce2428 --- /dev/null +++ b/include/asm-generic/bitops/fls64.h | |||
| @@ -0,0 +1,14 @@ | |||
| 1 | #ifndef _ASM_GENERIC_BITOPS_FLS64_H_ | ||
| 2 | #define _ASM_GENERIC_BITOPS_FLS64_H_ | ||
| 3 | |||
| 4 | #include <asm/types.h> | ||
| 5 | |||
| 6 | static inline int fls64(__u64 x) | ||
| 7 | { | ||
| 8 | __u32 h = x >> 32; | ||
| 9 | if (h) | ||
| 10 | return fls(h) + 32; | ||
| 11 | return fls(x); | ||
| 12 | } | ||
| 13 | |||
| 14 | #endif /* _ASM_GENERIC_BITOPS_FLS64_H_ */ | ||
diff --git a/include/asm-generic/bitops/hweight.h b/include/asm-generic/bitops/hweight.h new file mode 100644 index 000000000000..fbbc383771da --- /dev/null +++ b/include/asm-generic/bitops/hweight.h | |||
| @@ -0,0 +1,11 @@ | |||
| 1 | #ifndef _ASM_GENERIC_BITOPS_HWEIGHT_H_ | ||
| 2 | #define _ASM_GENERIC_BITOPS_HWEIGHT_H_ | ||
| 3 | |||
| 4 | #include <asm/types.h> | ||
| 5 | |||
| 6 | extern unsigned int hweight32(unsigned int w); | ||
| 7 | extern unsigned int hweight16(unsigned int w); | ||
| 8 | extern unsigned int hweight8(unsigned int w); | ||
| 9 | extern unsigned long hweight64(__u64 w); | ||
| 10 | |||
| 11 | #endif /* _ASM_GENERIC_BITOPS_HWEIGHT_H_ */ | ||
diff --git a/include/asm-generic/bitops/le.h b/include/asm-generic/bitops/le.h new file mode 100644 index 000000000000..b9c7e5d2d2ad --- /dev/null +++ b/include/asm-generic/bitops/le.h | |||
| @@ -0,0 +1,53 @@ | |||
| 1 | #ifndef _ASM_GENERIC_BITOPS_LE_H_ | ||
| 2 | #define _ASM_GENERIC_BITOPS_LE_H_ | ||
| 3 | |||
| 4 | #include <asm/types.h> | ||
| 5 | #include <asm/byteorder.h> | ||
| 6 | |||
| 7 | #define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) | ||
| 8 | #define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7) | ||
| 9 | |||
| 10 | #if defined(__LITTLE_ENDIAN) | ||
| 11 | |||
| 12 | #define generic_test_le_bit(nr, addr) test_bit(nr, addr) | ||
| 13 | #define generic___set_le_bit(nr, addr) __set_bit(nr, addr) | ||
| 14 | #define generic___clear_le_bit(nr, addr) __clear_bit(nr, addr) | ||
| 15 | |||
| 16 | #define generic_test_and_set_le_bit(nr, addr) test_and_set_bit(nr, addr) | ||
| 17 | #define generic_test_and_clear_le_bit(nr, addr) test_and_clear_bit(nr, addr) | ||
| 18 | |||
| 19 | #define generic___test_and_set_le_bit(nr, addr) __test_and_set_bit(nr, addr) | ||
| 20 | #define generic___test_and_clear_le_bit(nr, addr) __test_and_clear_bit(nr, addr) | ||
| 21 | |||
| 22 | #define generic_find_next_zero_le_bit(addr, size, offset) find_next_zero_bit(addr, size, offset) | ||
| 23 | |||
| 24 | #elif defined(__BIG_ENDIAN) | ||
| 25 | |||
| 26 | #define generic_test_le_bit(nr, addr) \ | ||
| 27 | test_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) | ||
| 28 | #define generic___set_le_bit(nr, addr) \ | ||
| 29 | __set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) | ||
| 30 | #define generic___clear_le_bit(nr, addr) \ | ||
| 31 | __clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) | ||
| 32 | |||
| 33 | #define generic_test_and_set_le_bit(nr, addr) \ | ||
| 34 | test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) | ||
| 35 | #define generic_test_and_clear_le_bit(nr, addr) \ | ||
| 36 | test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) | ||
| 37 | |||
| 38 | #define generic___test_and_set_le_bit(nr, addr) \ | ||
| 39 | __test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) | ||
| 40 | #define generic___test_and_clear_le_bit(nr, addr) \ | ||
| 41 | __test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) | ||
| 42 | |||
| 43 | extern unsigned long generic_find_next_zero_le_bit(const unsigned long *addr, | ||
| 44 | unsigned long size, unsigned long offset); | ||
| 45 | |||
| 46 | #else | ||
| 47 | #error "Please fix <asm/byteorder.h>" | ||
| 48 | #endif | ||
| 49 | |||
| 50 | #define generic_find_first_zero_le_bit(addr, size) \ | ||
| 51 | generic_find_next_zero_le_bit((addr), (size), 0) | ||
| 52 | |||
| 53 | #endif /* _ASM_GENERIC_BITOPS_LE_H_ */ | ||
diff --git a/include/asm-generic/bitops/minix-le.h b/include/asm-generic/bitops/minix-le.h new file mode 100644 index 000000000000..4a981c1bb1ae --- /dev/null +++ b/include/asm-generic/bitops/minix-le.h | |||
| @@ -0,0 +1,17 @@ | |||
| 1 | #ifndef _ASM_GENERIC_BITOPS_MINIX_LE_H_ | ||
| 2 | #define _ASM_GENERIC_BITOPS_MINIX_LE_H_ | ||
| 3 | |||
| 4 | #include <asm-generic/bitops/le.h> | ||
| 5 | |||
| 6 | #define minix_test_and_set_bit(nr,addr) \ | ||
| 7 | generic___test_and_set_le_bit((nr),(unsigned long *)(addr)) | ||
| 8 | #define minix_set_bit(nr,addr) \ | ||
| 9 | generic___set_le_bit((nr),(unsigned long *)(addr)) | ||
| 10 | #define minix_test_and_clear_bit(nr,addr) \ | ||
| 11 | generic___test_and_clear_le_bit((nr),(unsigned long *)(addr)) | ||
| 12 | #define minix_test_bit(nr,addr) \ | ||
| 13 | generic_test_le_bit((nr),(unsigned long *)(addr)) | ||
| 14 | #define minix_find_first_zero_bit(addr,size) \ | ||
| 15 | generic_find_first_zero_le_bit((unsigned long *)(addr),(size)) | ||
| 16 | |||
| 17 | #endif /* _ASM_GENERIC_BITOPS_MINIX_LE_H_ */ | ||
diff --git a/include/asm-generic/bitops/minix.h b/include/asm-generic/bitops/minix.h new file mode 100644 index 000000000000..91f42e87aa51 --- /dev/null +++ b/include/asm-generic/bitops/minix.h | |||
| @@ -0,0 +1,15 @@ | |||
| 1 | #ifndef _ASM_GENERIC_BITOPS_MINIX_H_ | ||
| 2 | #define _ASM_GENERIC_BITOPS_MINIX_H_ | ||
| 3 | |||
| 4 | #define minix_test_and_set_bit(nr,addr) \ | ||
| 5 | __test_and_set_bit((nr),(unsigned long *)(addr)) | ||
| 6 | #define minix_set_bit(nr,addr) \ | ||
| 7 | __set_bit((nr),(unsigned long *)(addr)) | ||
| 8 | #define minix_test_and_clear_bit(nr,addr) \ | ||
| 9 | __test_and_clear_bit((nr),(unsigned long *)(addr)) | ||
| 10 | #define minix_test_bit(nr,addr) \ | ||
| 11 | test_bit((nr),(unsigned long *)(addr)) | ||
| 12 | #define minix_find_first_zero_bit(addr,size) \ | ||
| 13 | find_first_zero_bit((unsigned long *)(addr),(size)) | ||
| 14 | |||
| 15 | #endif /* _ASM_GENERIC_BITOPS_MINIX_H_ */ | ||
diff --git a/include/asm-generic/bitops/non-atomic.h b/include/asm-generic/bitops/non-atomic.h new file mode 100644 index 000000000000..46a825cf2ae1 --- /dev/null +++ b/include/asm-generic/bitops/non-atomic.h | |||
| @@ -0,0 +1,111 @@ | |||
| 1 | #ifndef _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ | ||
| 2 | #define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ | ||
| 3 | |||
| 4 | #include <asm/types.h> | ||
| 5 | |||
| 6 | #define BITOP_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) | ||
| 7 | #define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) | ||
| 8 | |||
| 9 | /** | ||
| 10 | * __set_bit - Set a bit in memory | ||
| 11 | * @nr: the bit to set | ||
| 12 | * @addr: the address to start counting from | ||
| 13 | * | ||
| 14 | * Unlike set_bit(), this function is non-atomic and may be reordered. | ||
| 15 | * If it's called on the same region of memory simultaneously, the effect | ||
| 16 | * may be that only one operation succeeds. | ||
| 17 | */ | ||
| 18 | static inline void __set_bit(int nr, volatile unsigned long *addr) | ||
| 19 | { | ||
| 20 | unsigned long mask = BITOP_MASK(nr); | ||
| 21 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | ||
| 22 | |||
| 23 | *p |= mask; | ||
| 24 | } | ||
| 25 | |||
| 26 | static inline void __clear_bit(int nr, volatile unsigned long *addr) | ||
| 27 | { | ||
| 28 | unsigned long mask = BITOP_MASK(nr); | ||
| 29 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | ||
| 30 | |||
| 31 | *p &= ~mask; | ||
| 32 | } | ||
| 33 | |||
| 34 | /** | ||
| 35 | * __change_bit - Toggle a bit in memory | ||
| 36 | * @nr: the bit to change | ||
| 37 | * @addr: the address to start counting from | ||
| 38 | * | ||
| 39 | * Unlike change_bit(), this function is non-atomic and may be reordered. | ||
| 40 | * If it's called on the same region of memory simultaneously, the effect | ||
| 41 | * may be that only one operation succeeds. | ||
| 42 | */ | ||
| 43 | static inline void __change_bit(int nr, volatile unsigned long *addr) | ||
| 44 | { | ||
| 45 | unsigned long mask = BITOP_MASK(nr); | ||
| 46 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | ||
| 47 | |||
| 48 | *p ^= mask; | ||
| 49 | } | ||
| 50 | |||
| 51 | /** | ||
| 52 | * __test_and_set_bit - Set a bit and return its old value | ||
| 53 | * @nr: Bit to set | ||
| 54 | * @addr: Address to count from | ||
| 55 | * | ||
| 56 | * This operation is non-atomic and can be reordered. | ||
| 57 | * If two examples of this operation race, one can appear to succeed | ||
| 58 | * but actually fail. You must protect multiple accesses with a lock. | ||
| 59 | */ | ||
| 60 | static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) | ||
| 61 | { | ||
| 62 | unsigned long mask = BITOP_MASK(nr); | ||
| 63 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | ||
| 64 | unsigned long old = *p; | ||
| 65 | |||
| 66 | *p = old | mask; | ||
| 67 | return (old & mask) != 0; | ||
| 68 | } | ||
| 69 | |||
| 70 | /** | ||
| 71 | * __test_and_clear_bit - Clear a bit and return its old value | ||
| 72 | * @nr: Bit to clear | ||
| 73 | * @addr: Address to count from | ||
| 74 | * | ||
| 75 | * This operation is non-atomic and can be reordered. | ||
| 76 | * If two examples of this operation race, one can appear to succeed | ||
| 77 | * but actually fail. You must protect multiple accesses with a lock. | ||
| 78 | */ | ||
| 79 | static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) | ||
| 80 | { | ||
| 81 | unsigned long mask = BITOP_MASK(nr); | ||
| 82 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | ||
| 83 | unsigned long old = *p; | ||
| 84 | |||
| 85 | *p = old & ~mask; | ||
| 86 | return (old & mask) != 0; | ||
| 87 | } | ||
| 88 | |||
| 89 | /* WARNING: non atomic and it can be reordered! */ | ||
| 90 | static inline int __test_and_change_bit(int nr, | ||
| 91 | volatile unsigned long *addr) | ||
| 92 | { | ||
| 93 | unsigned long mask = BITOP_MASK(nr); | ||
| 94 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | ||
| 95 | unsigned long old = *p; | ||
| 96 | |||
| 97 | *p = old ^ mask; | ||
| 98 | return (old & mask) != 0; | ||
| 99 | } | ||
| 100 | |||
| 101 | /** | ||
| 102 | * test_bit - Determine whether a bit is set | ||
| 103 | * @nr: bit number to test | ||
| 104 | * @addr: Address to start counting from | ||
| 105 | */ | ||
| 106 | static inline int test_bit(int nr, const volatile unsigned long *addr) | ||
| 107 | { | ||
| 108 | return 1UL & (addr[BITOP_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); | ||
| 109 | } | ||
| 110 | |||
| 111 | #endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */ | ||
diff --git a/include/asm-generic/bitops/sched.h b/include/asm-generic/bitops/sched.h new file mode 100644 index 000000000000..5ef93a4d009f --- /dev/null +++ b/include/asm-generic/bitops/sched.h | |||
| @@ -0,0 +1,36 @@ | |||
| 1 | #ifndef _ASM_GENERIC_BITOPS_SCHED_H_ | ||
| 2 | #define _ASM_GENERIC_BITOPS_SCHED_H_ | ||
| 3 | |||
| 4 | #include <linux/compiler.h> /* unlikely() */ | ||
| 5 | #include <asm/types.h> | ||
| 6 | |||
| 7 | /* | ||
| 8 | * Every architecture must define this function. It's the fastest | ||
| 9 | * way of searching a 140-bit bitmap where the first 100 bits are | ||
| 10 | * unlikely to be set. It's guaranteed that at least one of the 140 | ||
| 11 | * bits is cleared. | ||
| 12 | */ | ||
| 13 | static inline int sched_find_first_bit(const unsigned long *b) | ||
| 14 | { | ||
| 15 | #if BITS_PER_LONG == 64 | ||
| 16 | if (unlikely(b[0])) | ||
| 17 | return __ffs(b[0]); | ||
| 18 | if (unlikely(b[1])) | ||
| 19 | return __ffs(b[1]) + 64; | ||
| 20 | return __ffs(b[2]) + 128; | ||
| 21 | #elif BITS_PER_LONG == 32 | ||
| 22 | if (unlikely(b[0])) | ||
| 23 | return __ffs(b[0]); | ||
| 24 | if (unlikely(b[1])) | ||
| 25 | return __ffs(b[1]) + 32; | ||
| 26 | if (unlikely(b[2])) | ||
| 27 | return __ffs(b[2]) + 64; | ||
| 28 | if (b[3]) | ||
| 29 | return __ffs(b[3]) + 96; | ||
| 30 | return __ffs(b[4]) + 128; | ||
| 31 | #else | ||
| 32 | #error BITS_PER_LONG not defined | ||
| 33 | #endif | ||
| 34 | } | ||
| 35 | |||
| 36 | #endif /* _ASM_GENERIC_BITOPS_SCHED_H_ */ | ||
diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h index 3ae2c7347549..df893c160318 100644 --- a/include/asm-generic/futex.h +++ b/include/asm-generic/futex.h | |||
| @@ -49,5 +49,11 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) | |||
| 49 | return ret; | 49 | return ret; |
| 50 | } | 50 | } |
| 51 | 51 | ||
| 52 | static inline int | ||
| 53 | futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) | ||
| 54 | { | ||
| 55 | return -ENOSYS; | ||
| 56 | } | ||
| 57 | |||
| 52 | #endif | 58 | #endif |
| 53 | #endif | 59 | #endif |
diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h new file mode 100644 index 000000000000..0cfb086dd373 --- /dev/null +++ b/include/asm-generic/memory_model.h | |||
| @@ -0,0 +1,77 @@ | |||
| 1 | #ifndef __ASM_MEMORY_MODEL_H | ||
| 2 | #define __ASM_MEMORY_MODEL_H | ||
| 3 | |||
| 4 | #ifdef __KERNEL__ | ||
| 5 | #ifndef __ASSEMBLY__ | ||
| 6 | |||
| 7 | #if defined(CONFIG_FLATMEM) | ||
| 8 | |||
| 9 | #ifndef ARCH_PFN_OFFSET | ||
| 10 | #define ARCH_PFN_OFFSET (0UL) | ||
| 11 | #endif | ||
| 12 | |||
| 13 | #elif defined(CONFIG_DISCONTIGMEM) | ||
| 14 | |||
| 15 | #ifndef arch_pfn_to_nid | ||
| 16 | #define arch_pfn_to_nid(pfn) pfn_to_nid(pfn) | ||
| 17 | #endif | ||
| 18 | |||
| 19 | #ifndef arch_local_page_offset | ||
| 20 | #define arch_local_page_offset(pfn, nid) \ | ||
| 21 | ((pfn) - NODE_DATA(nid)->node_start_pfn) | ||
| 22 | #endif | ||
| 23 | |||
| 24 | #endif /* CONFIG_DISCONTIGMEM */ | ||
| 25 | |||
| 26 | #ifdef CONFIG_OUT_OF_LINE_PFN_TO_PAGE | ||
| 27 | struct page; | ||
| 28 | /* this is useful when inlined pfn_to_page is too big */ | ||
| 29 | extern struct page *pfn_to_page(unsigned long pfn); | ||
| 30 | extern unsigned long page_to_pfn(struct page *page); | ||
| 31 | #else | ||
| 32 | /* | ||
| 33 | * supports 3 memory models. | ||
| 34 | */ | ||
| 35 | #if defined(CONFIG_FLATMEM) | ||
| 36 | |||
| 37 | #define pfn_to_page(pfn) (mem_map + ((pfn) - ARCH_PFN_OFFSET)) | ||
| 38 | #define page_to_pfn(page) ((unsigned long)((page) - mem_map) + \ | ||
| 39 | ARCH_PFN_OFFSET) | ||
| 40 | #elif defined(CONFIG_DISCONTIGMEM) | ||
| 41 | |||
| 42 | #define pfn_to_page(pfn) \ | ||
| 43 | ({ unsigned long __pfn = (pfn); \ | ||
| 44 | unsigned long __nid = arch_pfn_to_nid(pfn); \ | ||
| 45 | NODE_DATA(__nid)->node_mem_map + arch_local_page_offset(__pfn, __nid);\ | ||
| 46 | }) | ||
| 47 | |||
| 48 | #define page_to_pfn(pg) \ | ||
| 49 | ({ struct page *__pg = (pg); \ | ||
| 50 | struct pglist_data *__pgdat = NODE_DATA(page_to_nid(__pg)); \ | ||
| 51 | (unsigned long)(__pg - __pgdat->node_mem_map) + \ | ||
| 52 | __pgdat->node_start_pfn; \ | ||
| 53 | }) | ||
| 54 | |||
| 55 | #elif defined(CONFIG_SPARSEMEM) | ||
| 56 | /* | ||
| 57 | * Note: section's mem_map is encorded to reflect its start_pfn. | ||
| 58 | * section[i].section_mem_map == mem_map's address - start_pfn; | ||
| 59 | */ | ||
| 60 | #define page_to_pfn(pg) \ | ||
| 61 | ({ struct page *__pg = (pg); \ | ||
| 62 | int __sec = page_to_section(__pg); \ | ||
| 63 | __pg - __section_mem_map_addr(__nr_to_section(__sec)); \ | ||
| 64 | }) | ||
| 65 | |||
| 66 | #define pfn_to_page(pfn) \ | ||
| 67 | ({ unsigned long __pfn = (pfn); \ | ||
| 68 | struct mem_section *__sec = __pfn_to_section(__pfn); \ | ||
| 69 | __section_mem_map_addr(__sec) + __pfn; \ | ||
| 70 | }) | ||
| 71 | #endif /* CONFIG_FLATMEM/DISCONTIGMEM/SPARSEMEM */ | ||
| 72 | #endif /* CONFIG_OUT_OF_LINE_PFN_TO_PAGE */ | ||
| 73 | |||
| 74 | #endif /* __ASSEMBLY__ */ | ||
| 75 | #endif /* __KERNEL__ */ | ||
| 76 | |||
| 77 | #endif | ||
diff --git a/include/asm-h8300/bitops.h b/include/asm-h8300/bitops.h index ff7c2b721594..574f57b6c4d1 100644 --- a/include/asm-h8300/bitops.h +++ b/include/asm-h8300/bitops.h | |||
| @@ -8,7 +8,6 @@ | |||
| 8 | 8 | ||
| 9 | #include <linux/config.h> | 9 | #include <linux/config.h> |
| 10 | #include <linux/compiler.h> | 10 | #include <linux/compiler.h> |
| 11 | #include <asm/byteorder.h> /* swab32 */ | ||
| 12 | #include <asm/system.h> | 11 | #include <asm/system.h> |
| 13 | 12 | ||
| 14 | #ifdef __KERNEL__ | 13 | #ifdef __KERNEL__ |
| @@ -177,10 +176,7 @@ H8300_GEN_TEST_BITOP(test_and_change_bit,"bnot") | |||
| 177 | #undef H8300_GEN_TEST_BITOP_CONST_INT | 176 | #undef H8300_GEN_TEST_BITOP_CONST_INT |
| 178 | #undef H8300_GEN_TEST_BITOP | 177 | #undef H8300_GEN_TEST_BITOP |
| 179 | 178 | ||
| 180 | #define find_first_zero_bit(addr, size) \ | 179 | #include <asm-generic/bitops/ffs.h> |
| 181 | find_next_zero_bit((addr), (size), 0) | ||
| 182 | |||
| 183 | #define ffs(x) generic_ffs(x) | ||
| 184 | 180 | ||
| 185 | static __inline__ unsigned long __ffs(unsigned long word) | 181 | static __inline__ unsigned long __ffs(unsigned long word) |
| 186 | { | 182 | { |
| @@ -196,216 +192,16 @@ static __inline__ unsigned long __ffs(unsigned long word) | |||
| 196 | return result; | 192 | return result; |
| 197 | } | 193 | } |
| 198 | 194 | ||
| 199 | static __inline__ int find_next_zero_bit (const unsigned long * addr, int size, int offset) | 195 | #include <asm-generic/bitops/find.h> |
| 200 | { | 196 | #include <asm-generic/bitops/sched.h> |
| 201 | unsigned long *p = (unsigned long *)(((unsigned long)addr + (offset >> 3)) & ~3); | 197 | #include <asm-generic/bitops/hweight.h> |
| 202 | unsigned long result = offset & ~31UL; | 198 | #include <asm-generic/bitops/ext2-non-atomic.h> |
| 203 | unsigned long tmp; | 199 | #include <asm-generic/bitops/ext2-atomic.h> |
| 204 | 200 | #include <asm-generic/bitops/minix.h> | |
| 205 | if (offset >= size) | ||
| 206 | return size; | ||
| 207 | size -= result; | ||
| 208 | offset &= 31UL; | ||
| 209 | if (offset) { | ||
| 210 | tmp = *(p++); | ||
| 211 | tmp |= ~0UL >> (32-offset); | ||
| 212 | if (size < 32) | ||
| 213 | goto found_first; | ||
| 214 | if (~tmp) | ||
| 215 | goto found_middle; | ||
| 216 | size -= 32; | ||
| 217 | result += 32; | ||
| 218 | } | ||
| 219 | while (size & ~31UL) { | ||
| 220 | if (~(tmp = *(p++))) | ||
| 221 | goto found_middle; | ||
| 222 | result += 32; | ||
| 223 | size -= 32; | ||
| 224 | } | ||
| 225 | if (!size) | ||
| 226 | return result; | ||
| 227 | tmp = *p; | ||
| 228 | |||
| 229 | found_first: | ||
| 230 | tmp |= ~0UL << size; | ||
| 231 | found_middle: | ||
| 232 | return result + ffz(tmp); | ||
| 233 | } | ||
| 234 | |||
| 235 | static __inline__ unsigned long find_next_bit(const unsigned long *addr, | ||
| 236 | unsigned long size, unsigned long offset) | ||
| 237 | { | ||
| 238 | unsigned long *p = (unsigned long *)(((unsigned long)addr + (offset >> 3)) & ~3); | ||
| 239 | unsigned int result = offset & ~31UL; | ||
| 240 | unsigned int tmp; | ||
| 241 | |||
| 242 | if (offset >= size) | ||
| 243 | return size; | ||
| 244 | size -= result; | ||
| 245 | offset &= 31UL; | ||
| 246 | if (offset) { | ||
| 247 | tmp = *(p++); | ||
| 248 | tmp &= ~0UL << offset; | ||
| 249 | if (size < 32) | ||
| 250 | goto found_first; | ||
| 251 | if (tmp) | ||
| 252 | goto found_middle; | ||
| 253 | size -= 32; | ||
| 254 | result += 32; | ||
| 255 | } | ||
| 256 | while (size >= 32) { | ||
| 257 | if ((tmp = *p++) != 0) | ||
| 258 | goto found_middle; | ||
| 259 | result += 32; | ||
| 260 | size -= 32; | ||
| 261 | } | ||
| 262 | if (!size) | ||
| 263 | return result; | ||
| 264 | tmp = *p; | ||
| 265 | |||
| 266 | found_first: | ||
| 267 | tmp &= ~0UL >> (32 - size); | ||
| 268 | if (tmp == 0UL) | ||
| 269 | return result + size; | ||
| 270 | found_middle: | ||
| 271 | return result + __ffs(tmp); | ||
| 272 | } | ||
| 273 | |||
| 274 | #define find_first_bit(addr, size) find_next_bit(addr, size, 0) | ||
| 275 | |||
| 276 | /* | ||
| 277 | * Every architecture must define this function. It's the fastest | ||
| 278 | * way of searching a 140-bit bitmap where the first 100 bits are | ||
| 279 | * unlikely to be set. It's guaranteed that at least one of the 140 | ||
| 280 | * bits is cleared. | ||
| 281 | */ | ||
| 282 | static inline int sched_find_first_bit(unsigned long *b) | ||
| 283 | { | ||
| 284 | if (unlikely(b[0])) | ||
| 285 | return __ffs(b[0]); | ||
| 286 | if (unlikely(b[1])) | ||
| 287 | return __ffs(b[1]) + 32; | ||
| 288 | if (unlikely(b[2])) | ||
| 289 | return __ffs(b[2]) + 64; | ||
| 290 | if (b[3]) | ||
| 291 | return __ffs(b[3]) + 96; | ||
| 292 | return __ffs(b[4]) + 128; | ||
| 293 | } | ||
| 294 | |||
| 295 | /* | ||
| 296 | * hweightN: returns the hamming weight (i.e. the number | ||
| 297 | * of bits set) of a N-bit word | ||
| 298 | */ | ||
| 299 | |||
| 300 | #define hweight32(x) generic_hweight32(x) | ||
| 301 | #define hweight16(x) generic_hweight16(x) | ||
| 302 | #define hweight8(x) generic_hweight8(x) | ||
| 303 | |||
| 304 | static __inline__ int ext2_set_bit(int nr, volatile void * addr) | ||
| 305 | { | ||
| 306 | int mask, retval; | ||
| 307 | unsigned long flags; | ||
| 308 | volatile unsigned char *ADDR = (unsigned char *) addr; | ||
| 309 | |||
| 310 | ADDR += nr >> 3; | ||
| 311 | mask = 1 << (nr & 0x07); | ||
| 312 | local_irq_save(flags); | ||
| 313 | retval = (mask & *ADDR) != 0; | ||
| 314 | *ADDR |= mask; | ||
| 315 | local_irq_restore(flags); | ||
| 316 | return retval; | ||
| 317 | } | ||
| 318 | #define ext2_set_bit_atomic(lock, nr, addr) ext2_set_bit(nr, addr) | ||
| 319 | |||
| 320 | static __inline__ int ext2_clear_bit(int nr, volatile void * addr) | ||
| 321 | { | ||
| 322 | int mask, retval; | ||
| 323 | unsigned long flags; | ||
| 324 | volatile unsigned char *ADDR = (unsigned char *) addr; | ||
| 325 | |||
| 326 | ADDR += nr >> 3; | ||
| 327 | mask = 1 << (nr & 0x07); | ||
| 328 | local_irq_save(flags); | ||
| 329 | retval = (mask & *ADDR) != 0; | ||
| 330 | *ADDR &= ~mask; | ||
| 331 | local_irq_restore(flags); | ||
| 332 | return retval; | ||
| 333 | } | ||
| 334 | #define ext2_clear_bit_atomic(lock, nr, addr) ext2_set_bit(nr, addr) | ||
| 335 | |||
| 336 | static __inline__ int ext2_test_bit(int nr, const volatile void * addr) | ||
| 337 | { | ||
| 338 | int mask; | ||
| 339 | const volatile unsigned char *ADDR = (const unsigned char *) addr; | ||
| 340 | |||
| 341 | ADDR += nr >> 3; | ||
| 342 | mask = 1 << (nr & 0x07); | ||
| 343 | return ((mask & *ADDR) != 0); | ||
| 344 | } | ||
| 345 | |||
| 346 | #define ext2_find_first_zero_bit(addr, size) \ | ||
| 347 | ext2_find_next_zero_bit((addr), (size), 0) | ||
| 348 | |||
| 349 | static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset) | ||
| 350 | { | ||
| 351 | unsigned long *p = ((unsigned long *) addr) + (offset >> 5); | ||
| 352 | unsigned long result = offset & ~31UL; | ||
| 353 | unsigned long tmp; | ||
| 354 | |||
| 355 | if (offset >= size) | ||
| 356 | return size; | ||
| 357 | size -= result; | ||
| 358 | offset &= 31UL; | ||
| 359 | if(offset) { | ||
| 360 | /* We hold the little endian value in tmp, but then the | ||
| 361 | * shift is illegal. So we could keep a big endian value | ||
| 362 | * in tmp, like this: | ||
| 363 | * | ||
| 364 | * tmp = __swab32(*(p++)); | ||
| 365 | * tmp |= ~0UL >> (32-offset); | ||
| 366 | * | ||
| 367 | * but this would decrease performance, so we change the | ||
| 368 | * shift: | ||
| 369 | */ | ||
| 370 | tmp = *(p++); | ||
| 371 | tmp |= __swab32(~0UL >> (32-offset)); | ||
| 372 | if(size < 32) | ||
| 373 | goto found_first; | ||
| 374 | if(~tmp) | ||
| 375 | goto found_middle; | ||
| 376 | size -= 32; | ||
| 377 | result += 32; | ||
| 378 | } | ||
| 379 | while(size & ~31UL) { | ||
| 380 | if(~(tmp = *(p++))) | ||
| 381 | goto found_middle; | ||
| 382 | result += 32; | ||
| 383 | size -= 32; | ||
| 384 | } | ||
| 385 | if(!size) | ||
| 386 | return result; | ||
| 387 | tmp = *p; | ||
| 388 | |||
| 389 | found_first: | ||
| 390 | /* tmp is little endian, so we would have to swab the shift, | ||
| 391 | * see above. But then we have to swab tmp below for ffz, so | ||
| 392 | * we might as well do this here. | ||
| 393 | */ | ||
| 394 | return result + ffz(__swab32(tmp) | (~0UL << size)); | ||
| 395 | found_middle: | ||
| 396 | return result + ffz(__swab32(tmp)); | ||
| 397 | } | ||
| 398 | |||
| 399 | /* Bitmap functions for the minix filesystem. */ | ||
| 400 | #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr) | ||
| 401 | #define minix_set_bit(nr,addr) set_bit(nr,addr) | ||
| 402 | #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr) | ||
| 403 | #define minix_test_bit(nr,addr) test_bit(nr,addr) | ||
| 404 | #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) | ||
| 405 | 201 | ||
| 406 | #endif /* __KERNEL__ */ | 202 | #endif /* __KERNEL__ */ |
| 407 | 203 | ||
| 408 | #define fls(x) generic_fls(x) | 204 | #include <asm-generic/bitops/fls.h> |
| 409 | #define fls64(x) generic_fls64(x) | 205 | #include <asm-generic/bitops/fls64.h> |
| 410 | 206 | ||
| 411 | #endif /* _H8300_BITOPS_H */ | 207 | #endif /* _H8300_BITOPS_H */ |
diff --git a/include/asm-h8300/page.h b/include/asm-h8300/page.h index cd35b1cc6cde..6472c9f88227 100644 --- a/include/asm-h8300/page.h +++ b/include/asm-h8300/page.h | |||
| @@ -71,8 +71,7 @@ extern unsigned long memory_end; | |||
| 71 | #define page_to_virt(page) ((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET) | 71 | #define page_to_virt(page) ((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET) |
| 72 | #define pfn_valid(page) (page < max_mapnr) | 72 | #define pfn_valid(page) (page < max_mapnr) |
| 73 | 73 | ||
| 74 | #define pfn_to_page(pfn) virt_to_page(pfn_to_virt(pfn)) | 74 | #define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT) |
| 75 | #define page_to_pfn(page) virt_to_pfn(page_to_virt(page)) | ||
| 76 | 75 | ||
| 77 | #define virt_addr_valid(kaddr) (((void *)(kaddr) >= (void *)PAGE_OFFSET) && \ | 76 | #define virt_addr_valid(kaddr) (((void *)(kaddr) >= (void *)PAGE_OFFSET) && \ |
| 78 | ((void *)(kaddr) < (void *)memory_end)) | 77 | ((void *)(kaddr) < (void *)memory_end)) |
| @@ -81,6 +80,7 @@ extern unsigned long memory_end; | |||
| 81 | 80 | ||
| 82 | #endif /* __KERNEL__ */ | 81 | #endif /* __KERNEL__ */ |
| 83 | 82 | ||
| 83 | #include <asm-generic/memory_model.h> | ||
| 84 | #include <asm-generic/page.h> | 84 | #include <asm-generic/page.h> |
| 85 | 85 | ||
| 86 | #endif /* _H8300_PAGE_H */ | 86 | #endif /* _H8300_PAGE_H */ |
diff --git a/include/asm-h8300/types.h b/include/asm-h8300/types.h index bf91e0d4dde7..da2402b86540 100644 --- a/include/asm-h8300/types.h +++ b/include/asm-h8300/types.h | |||
| @@ -58,6 +58,9 @@ typedef u32 dma_addr_t; | |||
| 58 | #define HAVE_SECTOR_T | 58 | #define HAVE_SECTOR_T |
| 59 | typedef u64 sector_t; | 59 | typedef u64 sector_t; |
| 60 | 60 | ||
| 61 | #define HAVE_BLKCNT_T | ||
| 62 | typedef u64 blkcnt_t; | ||
| 63 | |||
| 61 | #endif /* __KERNEL__ */ | 64 | #endif /* __KERNEL__ */ |
| 62 | 65 | ||
| 63 | #endif /* __ASSEMBLY__ */ | 66 | #endif /* __ASSEMBLY__ */ |
diff --git a/include/asm-i386/bitops.h b/include/asm-i386/bitops.h index 7d20b95edb3b..08deaeee6be9 100644 --- a/include/asm-i386/bitops.h +++ b/include/asm-i386/bitops.h | |||
| @@ -362,28 +362,9 @@ static inline unsigned long ffz(unsigned long word) | |||
| 362 | return word; | 362 | return word; |
| 363 | } | 363 | } |
| 364 | 364 | ||
| 365 | #define fls64(x) generic_fls64(x) | ||
| 366 | |||
| 367 | #ifdef __KERNEL__ | 365 | #ifdef __KERNEL__ |
| 368 | 366 | ||
| 369 | /* | 367 | #include <asm-generic/bitops/sched.h> |
| 370 | * Every architecture must define this function. It's the fastest | ||
| 371 | * way of searching a 140-bit bitmap where the first 100 bits are | ||
| 372 | * unlikely to be set. It's guaranteed that at least one of the 140 | ||
| 373 | * bits is cleared. | ||
| 374 | */ | ||
| 375 | static inline int sched_find_first_bit(const unsigned long *b) | ||
| 376 | { | ||
| 377 | if (unlikely(b[0])) | ||
| 378 | return __ffs(b[0]); | ||
| 379 | if (unlikely(b[1])) | ||
| 380 | return __ffs(b[1]) + 32; | ||
| 381 | if (unlikely(b[2])) | ||
| 382 | return __ffs(b[2]) + 64; | ||
| 383 | if (b[3]) | ||
| 384 | return __ffs(b[3]) + 96; | ||
| 385 | return __ffs(b[4]) + 128; | ||
| 386 | } | ||
| 387 | 368 | ||
| 388 | /** | 369 | /** |
| 389 | * ffs - find first bit set | 370 | * ffs - find first bit set |
| @@ -421,42 +402,22 @@ static inline int fls(int x) | |||
| 421 | return r+1; | 402 | return r+1; |
| 422 | } | 403 | } |
| 423 | 404 | ||
| 424 | /** | 405 | #include <asm-generic/bitops/hweight.h> |
| 425 | * hweightN - returns the hamming weight of a N-bit word | ||
| 426 | * @x: the word to weigh | ||
| 427 | * | ||
| 428 | * The Hamming Weight of a number is the total number of bits set in it. | ||
| 429 | */ | ||
| 430 | |||
| 431 | #define hweight32(x) generic_hweight32(x) | ||
| 432 | #define hweight16(x) generic_hweight16(x) | ||
| 433 | #define hweight8(x) generic_hweight8(x) | ||
| 434 | 406 | ||
| 435 | #endif /* __KERNEL__ */ | 407 | #endif /* __KERNEL__ */ |
| 436 | 408 | ||
| 409 | #include <asm-generic/bitops/fls64.h> | ||
| 410 | |||
| 437 | #ifdef __KERNEL__ | 411 | #ifdef __KERNEL__ |
| 438 | 412 | ||
| 439 | #define ext2_set_bit(nr,addr) \ | 413 | #include <asm-generic/bitops/ext2-non-atomic.h> |
| 440 | __test_and_set_bit((nr),(unsigned long*)addr) | 414 | |
| 441 | #define ext2_set_bit_atomic(lock,nr,addr) \ | 415 | #define ext2_set_bit_atomic(lock,nr,addr) \ |
| 442 | test_and_set_bit((nr),(unsigned long*)addr) | 416 | test_and_set_bit((nr),(unsigned long*)addr) |
| 443 | #define ext2_clear_bit(nr, addr) \ | ||
| 444 | __test_and_clear_bit((nr),(unsigned long*)addr) | ||
| 445 | #define ext2_clear_bit_atomic(lock,nr, addr) \ | 417 | #define ext2_clear_bit_atomic(lock,nr, addr) \ |
| 446 | test_and_clear_bit((nr),(unsigned long*)addr) | 418 | test_and_clear_bit((nr),(unsigned long*)addr) |
| 447 | #define ext2_test_bit(nr, addr) test_bit((nr),(unsigned long*)addr) | 419 | |
| 448 | #define ext2_find_first_zero_bit(addr, size) \ | 420 | #include <asm-generic/bitops/minix.h> |
| 449 | find_first_zero_bit((unsigned long*)addr, size) | ||
| 450 | #define ext2_find_next_zero_bit(addr, size, off) \ | ||
| 451 | find_next_zero_bit((unsigned long*)addr, size, off) | ||
| 452 | |||
| 453 | /* Bitmap functions for the minix filesystem. */ | ||
| 454 | #define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,(void*)addr) | ||
| 455 | #define minix_set_bit(nr,addr) __set_bit(nr,(void*)addr) | ||
| 456 | #define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,(void*)addr) | ||
| 457 | #define minix_test_bit(nr,addr) test_bit(nr,(void*)addr) | ||
| 458 | #define minix_find_first_zero_bit(addr,size) \ | ||
| 459 | find_first_zero_bit((void*)addr,size) | ||
| 460 | 421 | ||
| 461 | #endif /* __KERNEL__ */ | 422 | #endif /* __KERNEL__ */ |
| 462 | 423 | ||
diff --git a/include/asm-i386/futex.h b/include/asm-i386/futex.h index 44b9db806474..7b8ceefd010f 100644 --- a/include/asm-i386/futex.h +++ b/include/asm-i386/futex.h | |||
| @@ -104,5 +104,32 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) | |||
| 104 | return ret; | 104 | return ret; |
| 105 | } | 105 | } |
| 106 | 106 | ||
| 107 | static inline int | ||
| 108 | futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) | ||
| 109 | { | ||
| 110 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | ||
| 111 | return -EFAULT; | ||
| 112 | |||
| 113 | __asm__ __volatile__( | ||
| 114 | "1: " LOCK_PREFIX "cmpxchgl %3, %1 \n" | ||
| 115 | |||
| 116 | "2: .section .fixup, \"ax\" \n" | ||
| 117 | "3: mov %2, %0 \n" | ||
| 118 | " jmp 2b \n" | ||
| 119 | " .previous \n" | ||
| 120 | |||
| 121 | " .section __ex_table, \"a\" \n" | ||
| 122 | " .align 8 \n" | ||
| 123 | " .long 1b,3b \n" | ||
| 124 | " .previous \n" | ||
| 125 | |||
| 126 | : "=a" (oldval), "=m" (*uaddr) | ||
| 127 | : "i" (-EFAULT), "r" (newval), "0" (oldval) | ||
| 128 | : "memory" | ||
| 129 | ); | ||
| 130 | |||
| 131 | return oldval; | ||
| 132 | } | ||
| 133 | |||
| 107 | #endif | 134 | #endif |
| 108 | #endif | 135 | #endif |
diff --git a/include/asm-i386/kdebug.h b/include/asm-i386/kdebug.h index 316138e89910..96d0828ce096 100644 --- a/include/asm-i386/kdebug.h +++ b/include/asm-i386/kdebug.h | |||
| @@ -17,11 +17,9 @@ struct die_args { | |||
| 17 | int signr; | 17 | int signr; |
| 18 | }; | 18 | }; |
| 19 | 19 | ||
| 20 | /* Note - you should never unregister because that can race with NMIs. | 20 | extern int register_die_notifier(struct notifier_block *); |
| 21 | If you really want to do it first unregister - then synchronize_sched - then free. | 21 | extern int unregister_die_notifier(struct notifier_block *); |
| 22 | */ | 22 | extern struct atomic_notifier_head i386die_chain; |
| 23 | int register_die_notifier(struct notifier_block *nb); | ||
| 24 | extern struct notifier_block *i386die_chain; | ||
| 25 | 23 | ||
| 26 | 24 | ||
| 27 | /* Grossly misnamed. */ | 25 | /* Grossly misnamed. */ |
| @@ -51,7 +49,7 @@ static inline int notify_die(enum die_val val, const char *str, | |||
| 51 | .trapnr = trap, | 49 | .trapnr = trap, |
| 52 | .signr = sig | 50 | .signr = sig |
| 53 | }; | 51 | }; |
| 54 | return notifier_call_chain(&i386die_chain, val, &args); | 52 | return atomic_notifier_call_chain(&i386die_chain, val, &args); |
| 55 | } | 53 | } |
| 56 | 54 | ||
| 57 | #endif | 55 | #endif |
diff --git a/include/asm-i386/kprobes.h b/include/asm-i386/kprobes.h index a0d2d74a7dda..57d157c5cf89 100644 --- a/include/asm-i386/kprobes.h +++ b/include/asm-i386/kprobes.h | |||
| @@ -34,6 +34,7 @@ struct pt_regs; | |||
| 34 | 34 | ||
| 35 | typedef u8 kprobe_opcode_t; | 35 | typedef u8 kprobe_opcode_t; |
| 36 | #define BREAKPOINT_INSTRUCTION 0xcc | 36 | #define BREAKPOINT_INSTRUCTION 0xcc |
| 37 | #define RELATIVEJUMP_INSTRUCTION 0xe9 | ||
| 37 | #define MAX_INSN_SIZE 16 | 38 | #define MAX_INSN_SIZE 16 |
| 38 | #define MAX_STACK_SIZE 64 | 39 | #define MAX_STACK_SIZE 64 |
| 39 | #define MIN_STACK_SIZE(ADDR) (((MAX_STACK_SIZE) < \ | 40 | #define MIN_STACK_SIZE(ADDR) (((MAX_STACK_SIZE) < \ |
| @@ -51,6 +52,11 @@ void kretprobe_trampoline(void); | |||
| 51 | struct arch_specific_insn { | 52 | struct arch_specific_insn { |
| 52 | /* copy of the original instruction */ | 53 | /* copy of the original instruction */ |
| 53 | kprobe_opcode_t *insn; | 54 | kprobe_opcode_t *insn; |
| 55 | /* | ||
| 56 | * If this flag is not 0, this kprobe can be boost when its | ||
| 57 | * post_handler and break_handler is not set. | ||
| 58 | */ | ||
| 59 | int boostable; | ||
| 54 | }; | 60 | }; |
| 55 | 61 | ||
| 56 | struct prev_kprobe { | 62 | struct prev_kprobe { |
diff --git a/include/asm-i386/mmzone.h b/include/asm-i386/mmzone.h index 74f595d80579..e33e9f9e4c66 100644 --- a/include/asm-i386/mmzone.h +++ b/include/asm-i386/mmzone.h | |||
| @@ -70,8 +70,6 @@ static inline int pfn_to_nid(unsigned long pfn) | |||
| 70 | #endif | 70 | #endif |
| 71 | } | 71 | } |
| 72 | 72 | ||
| 73 | #define node_localnr(pfn, nid) ((pfn) - node_data[nid]->node_start_pfn) | ||
| 74 | |||
| 75 | /* | 73 | /* |
| 76 | * Following are macros that each numa implmentation must define. | 74 | * Following are macros that each numa implmentation must define. |
| 77 | */ | 75 | */ |
| @@ -86,21 +84,6 @@ static inline int pfn_to_nid(unsigned long pfn) | |||
| 86 | /* XXX: FIXME -- wli */ | 84 | /* XXX: FIXME -- wli */ |
| 87 | #define kern_addr_valid(kaddr) (0) | 85 | #define kern_addr_valid(kaddr) (0) |
| 88 | 86 | ||
| 89 | #define pfn_to_page(pfn) \ | ||
| 90 | ({ \ | ||
| 91 | unsigned long __pfn = pfn; \ | ||
| 92 | int __node = pfn_to_nid(__pfn); \ | ||
| 93 | &NODE_DATA(__node)->node_mem_map[node_localnr(__pfn,__node)]; \ | ||
| 94 | }) | ||
| 95 | |||
| 96 | #define page_to_pfn(pg) \ | ||
| 97 | ({ \ | ||
| 98 | struct page *__page = pg; \ | ||
| 99 | struct zone *__zone = page_zone(__page); \ | ||
| 100 | (unsigned long)(__page - __zone->zone_mem_map) \ | ||
| 101 | + __zone->zone_start_pfn; \ | ||
| 102 | }) | ||
| 103 | |||
| 104 | #ifdef CONFIG_X86_NUMAQ /* we have contiguous memory on NUMA-Q */ | 87 | #ifdef CONFIG_X86_NUMAQ /* we have contiguous memory on NUMA-Q */ |
| 105 | #define pfn_valid(pfn) ((pfn) < num_physpages) | 88 | #define pfn_valid(pfn) ((pfn) < num_physpages) |
| 106 | #else | 89 | #else |
diff --git a/include/asm-i386/page.h b/include/asm-i386/page.h index 997ca5d17876..30f52a2263ba 100644 --- a/include/asm-i386/page.h +++ b/include/asm-i386/page.h | |||
| @@ -126,8 +126,6 @@ extern int page_is_ram(unsigned long pagenr); | |||
| 126 | #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) | 126 | #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) |
| 127 | #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) | 127 | #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) |
| 128 | #ifdef CONFIG_FLATMEM | 128 | #ifdef CONFIG_FLATMEM |
| 129 | #define pfn_to_page(pfn) (mem_map + (pfn)) | ||
| 130 | #define page_to_pfn(page) ((unsigned long)((page) - mem_map)) | ||
| 131 | #define pfn_valid(pfn) ((pfn) < max_mapnr) | 129 | #define pfn_valid(pfn) ((pfn) < max_mapnr) |
| 132 | #endif /* CONFIG_FLATMEM */ | 130 | #endif /* CONFIG_FLATMEM */ |
| 133 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) | 131 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) |
| @@ -141,6 +139,7 @@ extern int page_is_ram(unsigned long pagenr); | |||
| 141 | 139 | ||
| 142 | #endif /* __KERNEL__ */ | 140 | #endif /* __KERNEL__ */ |
| 143 | 141 | ||
| 142 | #include <asm-generic/memory_model.h> | ||
| 144 | #include <asm-generic/page.h> | 143 | #include <asm-generic/page.h> |
| 145 | 144 | ||
| 146 | #endif /* _I386_PAGE_H */ | 145 | #endif /* _I386_PAGE_H */ |
diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h index feca5d961e2b..af4bfd012475 100644 --- a/include/asm-i386/processor.h +++ b/include/asm-i386/processor.h | |||
| @@ -20,6 +20,7 @@ | |||
| 20 | #include <linux/config.h> | 20 | #include <linux/config.h> |
| 21 | #include <linux/threads.h> | 21 | #include <linux/threads.h> |
| 22 | #include <asm/percpu.h> | 22 | #include <asm/percpu.h> |
| 23 | #include <linux/cpumask.h> | ||
| 23 | 24 | ||
| 24 | /* flag for disabling the tsc */ | 25 | /* flag for disabling the tsc */ |
| 25 | extern int tsc_disable; | 26 | extern int tsc_disable; |
| @@ -67,6 +68,9 @@ struct cpuinfo_x86 { | |||
| 67 | char pad0; | 68 | char pad0; |
| 68 | int x86_power; | 69 | int x86_power; |
| 69 | unsigned long loops_per_jiffy; | 70 | unsigned long loops_per_jiffy; |
| 71 | #ifdef CONFIG_SMP | ||
| 72 | cpumask_t llc_shared_map; /* cpus sharing the last level cache */ | ||
| 73 | #endif | ||
| 70 | unsigned char x86_max_cores; /* cpuid returned max cores value */ | 74 | unsigned char x86_max_cores; /* cpuid returned max cores value */ |
| 71 | unsigned char booted_cores; /* number of cores as seen by OS */ | 75 | unsigned char booted_cores; /* number of cores as seen by OS */ |
| 72 | unsigned char apicid; | 76 | unsigned char apicid; |
| @@ -103,6 +107,7 @@ extern struct cpuinfo_x86 cpu_data[]; | |||
| 103 | 107 | ||
| 104 | extern int phys_proc_id[NR_CPUS]; | 108 | extern int phys_proc_id[NR_CPUS]; |
| 105 | extern int cpu_core_id[NR_CPUS]; | 109 | extern int cpu_core_id[NR_CPUS]; |
| 110 | extern int cpu_llc_id[NR_CPUS]; | ||
| 106 | extern char ignore_fpu_irq; | 111 | extern char ignore_fpu_irq; |
| 107 | 112 | ||
| 108 | extern void identify_cpu(struct cpuinfo_x86 *); | 113 | extern void identify_cpu(struct cpuinfo_x86 *); |
diff --git a/include/asm-i386/setup.h b/include/asm-i386/setup.h index 826a8ca50ac8..ee941457b55d 100644 --- a/include/asm-i386/setup.h +++ b/include/asm-i386/setup.h | |||
| @@ -6,9 +6,7 @@ | |||
| 6 | #ifndef _i386_SETUP_H | 6 | #ifndef _i386_SETUP_H |
| 7 | #define _i386_SETUP_H | 7 | #define _i386_SETUP_H |
| 8 | 8 | ||
| 9 | #define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT) | 9 | #include <linux/pfn.h> |
| 10 | #define PFN_DOWN(x) ((x) >> PAGE_SHIFT) | ||
| 11 | #define PFN_PHYS(x) ((x) << PAGE_SHIFT) | ||
| 12 | 10 | ||
| 13 | /* | 11 | /* |
| 14 | * Reserved space for vmalloc and iomap - defined in asm/page.h | 12 | * Reserved space for vmalloc and iomap - defined in asm/page.h |
diff --git a/include/asm-i386/stat.h b/include/asm-i386/stat.h index b464f8020ec4..67eae78323ba 100644 --- a/include/asm-i386/stat.h +++ b/include/asm-i386/stat.h | |||
| @@ -58,8 +58,7 @@ struct stat64 { | |||
| 58 | long long st_size; | 58 | long long st_size; |
| 59 | unsigned long st_blksize; | 59 | unsigned long st_blksize; |
| 60 | 60 | ||
| 61 | unsigned long st_blocks; /* Number 512-byte blocks allocated. */ | 61 | unsigned long long st_blocks; /* Number 512-byte blocks allocated. */ |
| 62 | unsigned long __pad4; /* future possible st_blocks high bits */ | ||
| 63 | 62 | ||
| 64 | unsigned long st_atime; | 63 | unsigned long st_atime; |
| 65 | unsigned long st_atime_nsec; | 64 | unsigned long st_atime_nsec; |
diff --git a/include/asm-i386/topology.h b/include/asm-i386/topology.h index aa958c6ee83e..b94e5eeef917 100644 --- a/include/asm-i386/topology.h +++ b/include/asm-i386/topology.h | |||
| @@ -112,4 +112,6 @@ extern unsigned long node_remap_size[]; | |||
| 112 | 112 | ||
| 113 | #endif /* CONFIG_NUMA */ | 113 | #endif /* CONFIG_NUMA */ |
| 114 | 114 | ||
| 115 | extern cpumask_t cpu_coregroup_map(int cpu); | ||
| 116 | |||
| 115 | #endif /* _ASM_I386_TOPOLOGY_H */ | 117 | #endif /* _ASM_I386_TOPOLOGY_H */ |
diff --git a/include/asm-i386/types.h b/include/asm-i386/types.h index ced00fe8fe61..e50a08bd7ced 100644 --- a/include/asm-i386/types.h +++ b/include/asm-i386/types.h | |||
| @@ -63,6 +63,11 @@ typedef u64 sector_t; | |||
| 63 | #define HAVE_SECTOR_T | 63 | #define HAVE_SECTOR_T |
| 64 | #endif | 64 | #endif |
| 65 | 65 | ||
| 66 | #ifdef CONFIG_LSF | ||
| 67 | typedef u64 blkcnt_t; | ||
| 68 | #define HAVE_BLKCNT_T | ||
| 69 | #endif | ||
| 70 | |||
| 66 | #endif /* __ASSEMBLY__ */ | 71 | #endif /* __ASSEMBLY__ */ |
| 67 | 72 | ||
| 68 | #endif /* __KERNEL__ */ | 73 | #endif /* __KERNEL__ */ |
diff --git a/include/asm-i386/unistd.h b/include/asm-i386/unistd.h index d8afd0e3b81a..014e3562895b 100644 --- a/include/asm-i386/unistd.h +++ b/include/asm-i386/unistd.h | |||
| @@ -316,8 +316,10 @@ | |||
| 316 | #define __NR_pselect6 308 | 316 | #define __NR_pselect6 308 |
| 317 | #define __NR_ppoll 309 | 317 | #define __NR_ppoll 309 |
| 318 | #define __NR_unshare 310 | 318 | #define __NR_unshare 310 |
| 319 | #define __NR_set_robust_list 311 | ||
| 320 | #define __NR_get_robust_list 312 | ||
| 319 | 321 | ||
| 320 | #define NR_syscalls 311 | 322 | #define NR_syscalls 313 |
| 321 | 323 | ||
| 322 | /* | 324 | /* |
| 323 | * user-visible error numbers are in the range -1 - -128: see | 325 | * user-visible error numbers are in the range -1 - -128: see |
diff --git a/include/asm-ia64/bitops.h b/include/asm-ia64/bitops.h index 36d0fb95ea89..90921e162793 100644 --- a/include/asm-ia64/bitops.h +++ b/include/asm-ia64/bitops.h | |||
| @@ -5,8 +5,8 @@ | |||
| 5 | * Copyright (C) 1998-2003 Hewlett-Packard Co | 5 | * Copyright (C) 1998-2003 Hewlett-Packard Co |
| 6 | * David Mosberger-Tang <davidm@hpl.hp.com> | 6 | * David Mosberger-Tang <davidm@hpl.hp.com> |
| 7 | * | 7 | * |
| 8 | * 02/06/02 find_next_bit() and find_first_bit() added from Erich Focht's ia64 O(1) | 8 | * 02/06/02 find_next_bit() and find_first_bit() added from Erich Focht's ia64 |
| 9 | * scheduler patch | 9 | * O(1) scheduler patch |
| 10 | */ | 10 | */ |
| 11 | 11 | ||
| 12 | #include <linux/compiler.h> | 12 | #include <linux/compiler.h> |
| @@ -25,9 +25,9 @@ | |||
| 25 | * restricted to acting on a single-word quantity. | 25 | * restricted to acting on a single-word quantity. |
| 26 | * | 26 | * |
| 27 | * The address must be (at least) "long" aligned. | 27 | * The address must be (at least) "long" aligned. |
| 28 | * Note that there are driver (e.g., eepro100) which use these operations to operate on | 28 | * Note that there are driver (e.g., eepro100) which use these operations to |
| 29 | * hw-defined data-structures, so we can't easily change these operations to force a | 29 | * operate on hw-defined data-structures, so we can't easily change these |
| 30 | * bigger alignment. | 30 | * operations to force a bigger alignment. |
| 31 | * | 31 | * |
| 32 | * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). | 32 | * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). |
| 33 | */ | 33 | */ |
| @@ -284,8 +284,8 @@ test_bit (int nr, const volatile void *addr) | |||
| 284 | * ffz - find the first zero bit in a long word | 284 | * ffz - find the first zero bit in a long word |
| 285 | * @x: The long word to find the bit in | 285 | * @x: The long word to find the bit in |
| 286 | * | 286 | * |
| 287 | * Returns the bit-number (0..63) of the first (least significant) zero bit. Undefined if | 287 | * Returns the bit-number (0..63) of the first (least significant) zero bit. |
| 288 | * no zero exists, so code should check against ~0UL first... | 288 | * Undefined if no zero exists, so code should check against ~0UL first... |
| 289 | */ | 289 | */ |
| 290 | static inline unsigned long | 290 | static inline unsigned long |
| 291 | ffz (unsigned long x) | 291 | ffz (unsigned long x) |
| @@ -345,13 +345,14 @@ fls (int t) | |||
| 345 | x |= x >> 16; | 345 | x |= x >> 16; |
| 346 | return ia64_popcnt(x); | 346 | return ia64_popcnt(x); |
| 347 | } | 347 | } |
| 348 | #define fls64(x) generic_fls64(x) | 348 | |
| 349 | #include <asm-generic/bitops/fls64.h> | ||
| 349 | 350 | ||
| 350 | /* | 351 | /* |
| 351 | * ffs: find first bit set. This is defined the same way as the libc and compiler builtin | 352 | * ffs: find first bit set. This is defined the same way as the libc and |
| 352 | * ffs routines, therefore differs in spirit from the above ffz (man ffs): it operates on | 353 | * compiler builtin ffs routines, therefore differs in spirit from the above |
| 353 | * "int" values only and the result value is the bit number + 1. ffs(0) is defined to | 354 | * ffz (man ffs): it operates on "int" values only and the result value is the |
| 354 | * return zero. | 355 | * bit number + 1. ffs(0) is defined to return zero. |
| 355 | */ | 356 | */ |
| 356 | #define ffs(x) __builtin_ffs(x) | 357 | #define ffs(x) __builtin_ffs(x) |
| 357 | 358 | ||
| @@ -373,51 +374,17 @@ hweight64 (unsigned long x) | |||
| 373 | 374 | ||
| 374 | #endif /* __KERNEL__ */ | 375 | #endif /* __KERNEL__ */ |
| 375 | 376 | ||
| 376 | extern int __find_next_zero_bit (const void *addr, unsigned long size, | 377 | #include <asm-generic/bitops/find.h> |
| 377 | unsigned long offset); | ||
| 378 | extern int __find_next_bit(const void *addr, unsigned long size, | ||
| 379 | unsigned long offset); | ||
| 380 | |||
| 381 | #define find_next_zero_bit(addr, size, offset) \ | ||
| 382 | __find_next_zero_bit((addr), (size), (offset)) | ||
| 383 | #define find_next_bit(addr, size, offset) \ | ||
| 384 | __find_next_bit((addr), (size), (offset)) | ||
| 385 | |||
| 386 | /* | ||
| 387 | * The optimizer actually does good code for this case.. | ||
| 388 | */ | ||
| 389 | #define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0) | ||
| 390 | |||
| 391 | #define find_first_bit(addr, size) find_next_bit((addr), (size), 0) | ||
| 392 | 378 | ||
| 393 | #ifdef __KERNEL__ | 379 | #ifdef __KERNEL__ |
| 394 | 380 | ||
| 395 | #define __clear_bit(nr, addr) clear_bit(nr, addr) | 381 | #include <asm-generic/bitops/ext2-non-atomic.h> |
| 396 | 382 | ||
| 397 | #define ext2_set_bit test_and_set_bit | ||
| 398 | #define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a) | 383 | #define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a) |
| 399 | #define ext2_clear_bit test_and_clear_bit | ||
| 400 | #define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a) | 384 | #define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a) |
| 401 | #define ext2_test_bit test_bit | ||
| 402 | #define ext2_find_first_zero_bit find_first_zero_bit | ||
| 403 | #define ext2_find_next_zero_bit find_next_zero_bit | ||
| 404 | |||
| 405 | /* Bitmap functions for the minix filesystem. */ | ||
| 406 | #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr) | ||
| 407 | #define minix_set_bit(nr,addr) set_bit(nr,addr) | ||
| 408 | #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr) | ||
| 409 | #define minix_test_bit(nr,addr) test_bit(nr,addr) | ||
| 410 | #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) | ||
| 411 | 385 | ||
| 412 | static inline int | 386 | #include <asm-generic/bitops/minix.h> |
| 413 | sched_find_first_bit (unsigned long *b) | 387 | #include <asm-generic/bitops/sched.h> |
| 414 | { | ||
| 415 | if (unlikely(b[0])) | ||
| 416 | return __ffs(b[0]); | ||
| 417 | if (unlikely(b[1])) | ||
| 418 | return 64 + __ffs(b[1]); | ||
| 419 | return __ffs(b[2]) + 128; | ||
| 420 | } | ||
| 421 | 388 | ||
| 422 | #endif /* __KERNEL__ */ | 389 | #endif /* __KERNEL__ */ |
| 423 | 390 | ||
diff --git a/include/asm-ia64/compat.h b/include/asm-ia64/compat.h index c0b19106665c..40d01d80610d 100644 --- a/include/asm-ia64/compat.h +++ b/include/asm-ia64/compat.h | |||
| @@ -189,6 +189,12 @@ compat_ptr (compat_uptr_t uptr) | |||
| 189 | return (void __user *) (unsigned long) uptr; | 189 | return (void __user *) (unsigned long) uptr; |
| 190 | } | 190 | } |
| 191 | 191 | ||
| 192 | static inline compat_uptr_t | ||
| 193 | ptr_to_compat(void __user *uptr) | ||
| 194 | { | ||
| 195 | return (u32)(unsigned long)uptr; | ||
| 196 | } | ||
| 197 | |||
| 192 | static __inline__ void __user * | 198 | static __inline__ void __user * |
| 193 | compat_alloc_user_space (long len) | 199 | compat_alloc_user_space (long len) |
| 194 | { | 200 | { |
diff --git a/include/asm-ia64/dmi.h b/include/asm-ia64/dmi.h new file mode 100644 index 000000000000..f3efaa229525 --- /dev/null +++ b/include/asm-ia64/dmi.h | |||
| @@ -0,0 +1,6 @@ | |||
| 1 | #ifndef _ASM_DMI_H | ||
| 2 | #define _ASM_DMI_H 1 | ||
| 3 | |||
| 4 | #include <asm/io.h> | ||
| 5 | |||
| 6 | #endif | ||
diff --git a/include/asm-ia64/io.h b/include/asm-ia64/io.h index b64fdb985494..c2e3742108bb 100644 --- a/include/asm-ia64/io.h +++ b/include/asm-ia64/io.h | |||
| @@ -88,8 +88,8 @@ phys_to_virt (unsigned long address) | |||
| 88 | } | 88 | } |
| 89 | 89 | ||
| 90 | #define ARCH_HAS_VALID_PHYS_ADDR_RANGE | 90 | #define ARCH_HAS_VALID_PHYS_ADDR_RANGE |
| 91 | extern int valid_phys_addr_range (unsigned long addr, size_t *count); /* efi.c */ | 91 | extern int valid_phys_addr_range (unsigned long addr, size_t count); /* efi.c */ |
| 92 | extern int valid_mmap_phys_addr_range (unsigned long addr, size_t *count); | 92 | extern int valid_mmap_phys_addr_range (unsigned long addr, size_t count); |
| 93 | 93 | ||
| 94 | /* | 94 | /* |
| 95 | * The following two macros are deprecated and scheduled for removal. | 95 | * The following two macros are deprecated and scheduled for removal. |
| @@ -416,24 +416,18 @@ __writeq (unsigned long val, volatile void __iomem *addr) | |||
| 416 | # define outl_p outl | 416 | # define outl_p outl |
| 417 | #endif | 417 | #endif |
| 418 | 418 | ||
| 419 | /* | 419 | extern void __iomem * ioremap(unsigned long offset, unsigned long size); |
| 420 | * An "address" in IO memory space is not clearly either an integer or a pointer. We will | 420 | extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size); |
| 421 | * accept both, thus the casts. | ||
| 422 | * | ||
| 423 | * On ia-64, we access the physical I/O memory space through the uncached kernel region. | ||
| 424 | */ | ||
| 425 | static inline void __iomem * | ||
| 426 | ioremap (unsigned long offset, unsigned long size) | ||
| 427 | { | ||
| 428 | return (void __iomem *) (__IA64_UNCACHED_OFFSET | (offset)); | ||
| 429 | } | ||
| 430 | 421 | ||
| 431 | static inline void | 422 | static inline void |
| 432 | iounmap (volatile void __iomem *addr) | 423 | iounmap (volatile void __iomem *addr) |
| 433 | { | 424 | { |
| 434 | } | 425 | } |
| 435 | 426 | ||
| 436 | #define ioremap_nocache(o,s) ioremap(o,s) | 427 | /* Use normal IO mappings for DMI */ |
| 428 | #define dmi_ioremap ioremap | ||
| 429 | #define dmi_iounmap(x,l) iounmap(x) | ||
| 430 | #define dmi_alloc(l) kmalloc(l, GFP_ATOMIC) | ||
| 437 | 431 | ||
| 438 | # ifdef __KERNEL__ | 432 | # ifdef __KERNEL__ |
| 439 | 433 | ||
diff --git a/include/asm-ia64/kdebug.h b/include/asm-ia64/kdebug.h index 8b01a083dde6..218c458ab60c 100644 --- a/include/asm-ia64/kdebug.h +++ b/include/asm-ia64/kdebug.h | |||
| @@ -40,7 +40,7 @@ struct die_args { | |||
| 40 | 40 | ||
| 41 | extern int register_die_notifier(struct notifier_block *); | 41 | extern int register_die_notifier(struct notifier_block *); |
| 42 | extern int unregister_die_notifier(struct notifier_block *); | 42 | extern int unregister_die_notifier(struct notifier_block *); |
| 43 | extern struct notifier_block *ia64die_chain; | 43 | extern struct atomic_notifier_head ia64die_chain; |
| 44 | 44 | ||
| 45 | enum die_val { | 45 | enum die_val { |
| 46 | DIE_BREAK = 1, | 46 | DIE_BREAK = 1, |
| @@ -81,7 +81,7 @@ static inline int notify_die(enum die_val val, char *str, struct pt_regs *regs, | |||
| 81 | .signr = sig | 81 | .signr = sig |
| 82 | }; | 82 | }; |
| 83 | 83 | ||
| 84 | return notifier_call_chain(&ia64die_chain, val, &args); | 84 | return atomic_notifier_call_chain(&ia64die_chain, val, &args); |
| 85 | } | 85 | } |
| 86 | 86 | ||
| 87 | #endif | 87 | #endif |
diff --git a/include/asm-ia64/page.h b/include/asm-ia64/page.h index 6e9aa23250c4..2087825eefa4 100644 --- a/include/asm-ia64/page.h +++ b/include/asm-ia64/page.h | |||
| @@ -106,17 +106,25 @@ extern int ia64_pfn_valid (unsigned long pfn); | |||
| 106 | # define ia64_pfn_valid(pfn) 1 | 106 | # define ia64_pfn_valid(pfn) 1 |
| 107 | #endif | 107 | #endif |
| 108 | 108 | ||
| 109 | #ifdef CONFIG_VIRTUAL_MEM_MAP | ||
| 110 | extern struct page *vmem_map; | ||
| 111 | #ifdef CONFIG_DISCONTIGMEM | ||
| 112 | # define page_to_pfn(page) ((unsigned long) (page - vmem_map)) | ||
| 113 | # define pfn_to_page(pfn) (vmem_map + (pfn)) | ||
| 114 | #endif | ||
| 115 | #endif | ||
| 116 | |||
| 117 | #if defined(CONFIG_FLATMEM) || defined(CONFIG_SPARSEMEM) | ||
| 118 | /* FLATMEM always configures mem_map (mem_map = vmem_map if necessary) */ | ||
| 119 | #include <asm-generic/memory_model.h> | ||
| 120 | #endif | ||
| 121 | |||
| 109 | #ifdef CONFIG_FLATMEM | 122 | #ifdef CONFIG_FLATMEM |
| 110 | # define pfn_valid(pfn) (((pfn) < max_mapnr) && ia64_pfn_valid(pfn)) | 123 | # define pfn_valid(pfn) (((pfn) < max_mapnr) && ia64_pfn_valid(pfn)) |
| 111 | # define page_to_pfn(page) ((unsigned long) (page - mem_map)) | ||
| 112 | # define pfn_to_page(pfn) (mem_map + (pfn)) | ||
| 113 | #elif defined(CONFIG_DISCONTIGMEM) | 124 | #elif defined(CONFIG_DISCONTIGMEM) |
| 114 | extern struct page *vmem_map; | ||
| 115 | extern unsigned long min_low_pfn; | 125 | extern unsigned long min_low_pfn; |
| 116 | extern unsigned long max_low_pfn; | 126 | extern unsigned long max_low_pfn; |
| 117 | # define pfn_valid(pfn) (((pfn) >= min_low_pfn) && ((pfn) < max_low_pfn) && ia64_pfn_valid(pfn)) | 127 | # define pfn_valid(pfn) (((pfn) >= min_low_pfn) && ((pfn) < max_low_pfn) && ia64_pfn_valid(pfn)) |
| 118 | # define page_to_pfn(page) ((unsigned long) (page - vmem_map)) | ||
| 119 | # define pfn_to_page(pfn) (vmem_map + (pfn)) | ||
| 120 | #endif | 128 | #endif |
| 121 | 129 | ||
| 122 | #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) | 130 | #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) |
diff --git a/include/asm-ia64/sn/sn_sal.h b/include/asm-ia64/sn/sn_sal.h index 244449df7411..bf4cc867a698 100644 --- a/include/asm-ia64/sn/sn_sal.h +++ b/include/asm-ia64/sn/sn_sal.h | |||
| @@ -159,7 +159,7 @@ | |||
| 159 | static inline u32 | 159 | static inline u32 |
| 160 | sn_sal_rev(void) | 160 | sn_sal_rev(void) |
| 161 | { | 161 | { |
| 162 | struct ia64_sal_systab *systab = efi.sal_systab; | 162 | struct ia64_sal_systab *systab = __va(efi.sal_systab); |
| 163 | 163 | ||
| 164 | return (u32)(systab->sal_b_rev_major << 8 | systab->sal_b_rev_minor); | 164 | return (u32)(systab->sal_b_rev_major << 8 | systab->sal_b_rev_minor); |
| 165 | } | 165 | } |
diff --git a/include/asm-m32r/bitops.h b/include/asm-m32r/bitops.h index abea2fdd8689..902a366101a5 100644 --- a/include/asm-m32r/bitops.h +++ b/include/asm-m32r/bitops.h | |||
| @@ -63,25 +63,6 @@ static __inline__ void set_bit(int nr, volatile void * addr) | |||
| 63 | } | 63 | } |
| 64 | 64 | ||
| 65 | /** | 65 | /** |
| 66 | * __set_bit - Set a bit in memory | ||
| 67 | * @nr: the bit to set | ||
| 68 | * @addr: the address to start counting from | ||
| 69 | * | ||
| 70 | * Unlike set_bit(), this function is non-atomic and may be reordered. | ||
| 71 | * If it's called on the same region of memory simultaneously, the effect | ||
| 72 | * may be that only one operation succeeds. | ||
| 73 | */ | ||
| 74 | static __inline__ void __set_bit(int nr, volatile void * addr) | ||
| 75 | { | ||
| 76 | __u32 mask; | ||
| 77 | volatile __u32 *a = addr; | ||
| 78 | |||
| 79 | a += (nr >> 5); | ||
| 80 | mask = (1 << (nr & 0x1F)); | ||
| 81 | *a |= mask; | ||
| 82 | } | ||
| 83 | |||
| 84 | /** | ||
| 85 | * clear_bit - Clears a bit in memory | 66 | * clear_bit - Clears a bit in memory |
| 86 | * @nr: Bit to clear | 67 | * @nr: Bit to clear |
| 87 | * @addr: Address to start counting from | 68 | * @addr: Address to start counting from |
| @@ -118,39 +99,10 @@ static __inline__ void clear_bit(int nr, volatile void * addr) | |||
| 118 | local_irq_restore(flags); | 99 | local_irq_restore(flags); |
| 119 | } | 100 | } |
| 120 | 101 | ||
| 121 | static __inline__ void __clear_bit(int nr, volatile unsigned long * addr) | ||
| 122 | { | ||
| 123 | unsigned long mask; | ||
| 124 | volatile unsigned long *a = addr; | ||
| 125 | |||
| 126 | a += (nr >> 5); | ||
| 127 | mask = (1 << (nr & 0x1F)); | ||
| 128 | *a &= ~mask; | ||
| 129 | } | ||
| 130 | |||
| 131 | #define smp_mb__before_clear_bit() barrier() | 102 | #define smp_mb__before_clear_bit() barrier() |
| 132 | #define smp_mb__after_clear_bit() barrier() | 103 | #define smp_mb__after_clear_bit() barrier() |
| 133 | 104 | ||
| 134 | /** | 105 | /** |
| 135 | * __change_bit - Toggle a bit in memory | ||
| 136 | * @nr: the bit to set | ||
| 137 | * @addr: the address to start counting from | ||
| 138 | * | ||
| 139 | * Unlike change_bit(), this function is non-atomic and may be reordered. | ||
| 140 | * If it's called on the same region of memory simultaneously, the effect | ||
| 141 | * may be that only one operation succeeds. | ||
| 142 | */ | ||
| 143 | static __inline__ void __change_bit(int nr, volatile void * addr) | ||
| 144 | { | ||
| 145 | __u32 mask; | ||
| 146 | volatile __u32 *a = addr; | ||
| 147 | |||
| 148 | a += (nr >> 5); | ||
| 149 | mask = (1 << (nr & 0x1F)); | ||
| 150 | *a ^= mask; | ||
| 151 | } | ||
| 152 | |||
| 153 | /** | ||
| 154 | * change_bit - Toggle a bit in memory | 106 | * change_bit - Toggle a bit in memory |
| 155 | * @nr: Bit to clear | 107 | * @nr: Bit to clear |
| 156 | * @addr: Address to start counting from | 108 | * @addr: Address to start counting from |
| @@ -221,28 +173,6 @@ static __inline__ int test_and_set_bit(int nr, volatile void * addr) | |||
| 221 | } | 173 | } |
| 222 | 174 | ||
| 223 | /** | 175 | /** |
| 224 | * __test_and_set_bit - Set a bit and return its old value | ||
| 225 | * @nr: Bit to set | ||
| 226 | * @addr: Address to count from | ||
| 227 | * | ||
| 228 | * This operation is non-atomic and can be reordered. | ||
| 229 | * If two examples of this operation race, one can appear to succeed | ||
| 230 | * but actually fail. You must protect multiple accesses with a lock. | ||
| 231 | */ | ||
| 232 | static __inline__ int __test_and_set_bit(int nr, volatile void * addr) | ||
| 233 | { | ||
| 234 | __u32 mask, oldbit; | ||
| 235 | volatile __u32 *a = addr; | ||
| 236 | |||
| 237 | a += (nr >> 5); | ||
| 238 | mask = (1 << (nr & 0x1F)); | ||
| 239 | oldbit = (*a & mask); | ||
| 240 | *a |= mask; | ||
| 241 | |||
| 242 | return (oldbit != 0); | ||
| 243 | } | ||
| 244 | |||
| 245 | /** | ||
| 246 | * test_and_clear_bit - Clear a bit and return its old value | 176 | * test_and_clear_bit - Clear a bit and return its old value |
| 247 | * @nr: Bit to set | 177 | * @nr: Bit to set |
| 248 | * @addr: Address to count from | 178 | * @addr: Address to count from |
| @@ -280,42 +210,6 @@ static __inline__ int test_and_clear_bit(int nr, volatile void * addr) | |||
| 280 | } | 210 | } |
| 281 | 211 | ||
| 282 | /** | 212 | /** |
| 283 | * __test_and_clear_bit - Clear a bit and return its old value | ||
| 284 | * @nr: Bit to set | ||
| 285 | * @addr: Address to count from | ||
| 286 | * | ||
| 287 | * This operation is non-atomic and can be reordered. | ||
| 288 | * If two examples of this operation race, one can appear to succeed | ||
| 289 | * but actually fail. You must protect multiple accesses with a lock. | ||
| 290 | */ | ||
| 291 | static __inline__ int __test_and_clear_bit(int nr, volatile void * addr) | ||
| 292 | { | ||
| 293 | __u32 mask, oldbit; | ||
| 294 | volatile __u32 *a = addr; | ||
| 295 | |||
| 296 | a += (nr >> 5); | ||
| 297 | mask = (1 << (nr & 0x1F)); | ||
| 298 | oldbit = (*a & mask); | ||
| 299 | *a &= ~mask; | ||
| 300 | |||
| 301 | return (oldbit != 0); | ||
| 302 | } | ||
| 303 | |||
| 304 | /* WARNING: non atomic and it can be reordered! */ | ||
| 305 | static __inline__ int __test_and_change_bit(int nr, volatile void * addr) | ||
| 306 | { | ||
| 307 | __u32 mask, oldbit; | ||
| 308 | volatile __u32 *a = addr; | ||
| 309 | |||
| 310 | a += (nr >> 5); | ||
| 311 | mask = (1 << (nr & 0x1F)); | ||
| 312 | oldbit = (*a & mask); | ||
| 313 | *a ^= mask; | ||
| 314 | |||
| 315 | return (oldbit != 0); | ||
| 316 | } | ||
| 317 | |||
| 318 | /** | ||
| 319 | * test_and_change_bit - Change a bit and return its old value | 213 | * test_and_change_bit - Change a bit and return its old value |
| 320 | * @nr: Bit to set | 214 | * @nr: Bit to set |
| 321 | * @addr: Address to count from | 215 | * @addr: Address to count from |
| @@ -350,353 +244,26 @@ static __inline__ int test_and_change_bit(int nr, volatile void * addr) | |||
| 350 | return (oldbit != 0); | 244 | return (oldbit != 0); |
| 351 | } | 245 | } |
| 352 | 246 | ||
| 353 | /** | 247 | #include <asm-generic/bitops/non-atomic.h> |
| 354 | * test_bit - Determine whether a bit is set | 248 | #include <asm-generic/bitops/ffz.h> |
| 355 | * @nr: bit number to test | 249 | #include <asm-generic/bitops/__ffs.h> |
| 356 | * @addr: Address to start counting from | 250 | #include <asm-generic/bitops/fls.h> |
| 357 | */ | 251 | #include <asm-generic/bitops/fls64.h> |
| 358 | static __inline__ int test_bit(int nr, const volatile void * addr) | ||
| 359 | { | ||
| 360 | __u32 mask; | ||
| 361 | const volatile __u32 *a = addr; | ||
| 362 | |||
| 363 | a += (nr >> 5); | ||
| 364 | mask = (1 << (nr & 0x1F)); | ||
| 365 | |||
| 366 | return ((*a & mask) != 0); | ||
| 367 | } | ||
| 368 | |||
| 369 | /** | ||
| 370 | * ffz - find first zero in word. | ||
| 371 | * @word: The word to search | ||
| 372 | * | ||
| 373 | * Undefined if no zero exists, so code should check against ~0UL first. | ||
| 374 | */ | ||
| 375 | static __inline__ unsigned long ffz(unsigned long word) | ||
| 376 | { | ||
| 377 | int k; | ||
| 378 | |||
| 379 | word = ~word; | ||
| 380 | k = 0; | ||
| 381 | if (!(word & 0x0000ffff)) { k += 16; word >>= 16; } | ||
| 382 | if (!(word & 0x000000ff)) { k += 8; word >>= 8; } | ||
| 383 | if (!(word & 0x0000000f)) { k += 4; word >>= 4; } | ||
| 384 | if (!(word & 0x00000003)) { k += 2; word >>= 2; } | ||
| 385 | if (!(word & 0x00000001)) { k += 1; } | ||
| 386 | |||
| 387 | return k; | ||
| 388 | } | ||
| 389 | |||
| 390 | /** | ||
| 391 | * find_first_zero_bit - find the first zero bit in a memory region | ||
| 392 | * @addr: The address to start the search at | ||
| 393 | * @size: The maximum size to search | ||
| 394 | * | ||
| 395 | * Returns the bit-number of the first zero bit, not the number of the byte | ||
| 396 | * containing a bit. | ||
| 397 | */ | ||
| 398 | |||
| 399 | #define find_first_zero_bit(addr, size) \ | ||
| 400 | find_next_zero_bit((addr), (size), 0) | ||
| 401 | |||
| 402 | /** | ||
| 403 | * find_next_zero_bit - find the first zero bit in a memory region | ||
| 404 | * @addr: The address to base the search on | ||
| 405 | * @offset: The bitnumber to start searching at | ||
| 406 | * @size: The maximum size to search | ||
| 407 | */ | ||
| 408 | static __inline__ int find_next_zero_bit(const unsigned long *addr, | ||
| 409 | int size, int offset) | ||
| 410 | { | ||
| 411 | const unsigned long *p = addr + (offset >> 5); | ||
| 412 | unsigned long result = offset & ~31UL; | ||
| 413 | unsigned long tmp; | ||
| 414 | |||
| 415 | if (offset >= size) | ||
| 416 | return size; | ||
| 417 | size -= result; | ||
| 418 | offset &= 31UL; | ||
| 419 | if (offset) { | ||
| 420 | tmp = *(p++); | ||
| 421 | tmp |= ~0UL >> (32-offset); | ||
| 422 | if (size < 32) | ||
| 423 | goto found_first; | ||
| 424 | if (~tmp) | ||
| 425 | goto found_middle; | ||
| 426 | size -= 32; | ||
| 427 | result += 32; | ||
| 428 | } | ||
| 429 | while (size & ~31UL) { | ||
| 430 | if (~(tmp = *(p++))) | ||
| 431 | goto found_middle; | ||
| 432 | result += 32; | ||
| 433 | size -= 32; | ||
| 434 | } | ||
| 435 | if (!size) | ||
| 436 | return result; | ||
| 437 | tmp = *p; | ||
| 438 | |||
| 439 | found_first: | ||
| 440 | tmp |= ~0UL << size; | ||
| 441 | found_middle: | ||
| 442 | return result + ffz(tmp); | ||
| 443 | } | ||
| 444 | |||
| 445 | /** | ||
| 446 | * __ffs - find first bit in word. | ||
| 447 | * @word: The word to search | ||
| 448 | * | ||
| 449 | * Undefined if no bit exists, so code should check against 0 first. | ||
| 450 | */ | ||
| 451 | static __inline__ unsigned long __ffs(unsigned long word) | ||
| 452 | { | ||
| 453 | int k = 0; | ||
| 454 | |||
| 455 | if (!(word & 0x0000ffff)) { k += 16; word >>= 16; } | ||
| 456 | if (!(word & 0x000000ff)) { k += 8; word >>= 8; } | ||
| 457 | if (!(word & 0x0000000f)) { k += 4; word >>= 4; } | ||
| 458 | if (!(word & 0x00000003)) { k += 2; word >>= 2; } | ||
| 459 | if (!(word & 0x00000001)) { k += 1;} | ||
| 460 | |||
| 461 | return k; | ||
| 462 | } | ||
| 463 | |||
| 464 | /* | ||
| 465 | * fls: find last bit set. | ||
| 466 | */ | ||
| 467 | #define fls(x) generic_fls(x) | ||
| 468 | #define fls64(x) generic_fls64(x) | ||
| 469 | 252 | ||
| 470 | #ifdef __KERNEL__ | 253 | #ifdef __KERNEL__ |
| 471 | 254 | ||
| 472 | /* | 255 | #include <asm-generic/bitops/sched.h> |
| 473 | * Every architecture must define this function. It's the fastest | 256 | #include <asm-generic/bitops/find.h> |
| 474 | * way of searching a 140-bit bitmap where the first 100 bits are | 257 | #include <asm-generic/bitops/ffs.h> |
| 475 | * unlikely to be set. It's guaranteed that at least one of the 140 | 258 | #include <asm-generic/bitops/hweight.h> |
| 476 | * bits is cleared. | ||
| 477 | */ | ||
| 478 | static inline int sched_find_first_bit(unsigned long *b) | ||
| 479 | { | ||
| 480 | if (unlikely(b[0])) | ||
| 481 | return __ffs(b[0]); | ||
| 482 | if (unlikely(b[1])) | ||
| 483 | return __ffs(b[1]) + 32; | ||
| 484 | if (unlikely(b[2])) | ||
| 485 | return __ffs(b[2]) + 64; | ||
| 486 | if (b[3]) | ||
| 487 | return __ffs(b[3]) + 96; | ||
| 488 | return __ffs(b[4]) + 128; | ||
| 489 | } | ||
| 490 | |||
| 491 | /** | ||
| 492 | * find_next_bit - find the first set bit in a memory region | ||
| 493 | * @addr: The address to base the search on | ||
| 494 | * @offset: The bitnumber to start searching at | ||
| 495 | * @size: The maximum size to search | ||
| 496 | */ | ||
| 497 | static inline unsigned long find_next_bit(const unsigned long *addr, | ||
| 498 | unsigned long size, unsigned long offset) | ||
| 499 | { | ||
| 500 | unsigned int *p = ((unsigned int *) addr) + (offset >> 5); | ||
| 501 | unsigned int result = offset & ~31UL; | ||
| 502 | unsigned int tmp; | ||
| 503 | |||
| 504 | if (offset >= size) | ||
| 505 | return size; | ||
| 506 | size -= result; | ||
| 507 | offset &= 31UL; | ||
| 508 | if (offset) { | ||
| 509 | tmp = *p++; | ||
| 510 | tmp &= ~0UL << offset; | ||
| 511 | if (size < 32) | ||
| 512 | goto found_first; | ||
| 513 | if (tmp) | ||
| 514 | goto found_middle; | ||
| 515 | size -= 32; | ||
| 516 | result += 32; | ||
| 517 | } | ||
| 518 | while (size >= 32) { | ||
| 519 | if ((tmp = *p++) != 0) | ||
| 520 | goto found_middle; | ||
| 521 | result += 32; | ||
| 522 | size -= 32; | ||
| 523 | } | ||
| 524 | if (!size) | ||
| 525 | return result; | ||
| 526 | tmp = *p; | ||
| 527 | |||
| 528 | found_first: | ||
| 529 | tmp &= ~0UL >> (32 - size); | ||
| 530 | if (tmp == 0UL) /* Are any bits set? */ | ||
| 531 | return result + size; /* Nope. */ | ||
| 532 | found_middle: | ||
| 533 | return result + __ffs(tmp); | ||
| 534 | } | ||
| 535 | |||
| 536 | /** | ||
| 537 | * find_first_bit - find the first set bit in a memory region | ||
| 538 | * @addr: The address to start the search at | ||
| 539 | * @size: The maximum size to search | ||
| 540 | * | ||
| 541 | * Returns the bit-number of the first set bit, not the number of the byte | ||
| 542 | * containing a bit. | ||
| 543 | */ | ||
| 544 | #define find_first_bit(addr, size) \ | ||
| 545 | find_next_bit((addr), (size), 0) | ||
| 546 | |||
| 547 | /** | ||
| 548 | * ffs - find first bit set | ||
| 549 | * @x: the word to search | ||
| 550 | * | ||
| 551 | * This is defined the same way as | ||
| 552 | * the libc and compiler builtin ffs routines, therefore | ||
| 553 | * differs in spirit from the above ffz (man ffs). | ||
| 554 | */ | ||
| 555 | #define ffs(x) generic_ffs(x) | ||
| 556 | |||
| 557 | /** | ||
| 558 | * hweightN - returns the hamming weight of a N-bit word | ||
| 559 | * @x: the word to weigh | ||
| 560 | * | ||
| 561 | * The Hamming Weight of a number is the total number of bits set in it. | ||
| 562 | */ | ||
| 563 | |||
| 564 | #define hweight32(x) generic_hweight32(x) | ||
| 565 | #define hweight16(x) generic_hweight16(x) | ||
| 566 | #define hweight8(x) generic_hweight8(x) | ||
| 567 | 259 | ||
| 568 | #endif /* __KERNEL__ */ | 260 | #endif /* __KERNEL__ */ |
| 569 | 261 | ||
| 570 | #ifdef __KERNEL__ | 262 | #ifdef __KERNEL__ |
| 571 | 263 | ||
| 572 | /* | 264 | #include <asm-generic/bitops/ext2-non-atomic.h> |
| 573 | * ext2_XXXX function | 265 | #include <asm-generic/bitops/ext2-atomic.h> |
| 574 | * orig: include/asm-sh/bitops.h | 266 | #include <asm-generic/bitops/minix.h> |
| 575 | */ | ||
| 576 | |||
| 577 | #ifdef __LITTLE_ENDIAN__ | ||
| 578 | #define ext2_set_bit test_and_set_bit | ||
| 579 | #define ext2_clear_bit __test_and_clear_bit | ||
| 580 | #define ext2_test_bit test_bit | ||
| 581 | #define ext2_find_first_zero_bit find_first_zero_bit | ||
| 582 | #define ext2_find_next_zero_bit find_next_zero_bit | ||
| 583 | #else | ||
| 584 | static inline int ext2_set_bit(int nr, volatile void * addr) | ||
| 585 | { | ||
| 586 | __u8 mask, oldbit; | ||
| 587 | volatile __u8 *a = addr; | ||
| 588 | |||
| 589 | a += (nr >> 3); | ||
| 590 | mask = (1 << (nr & 0x07)); | ||
| 591 | oldbit = (*a & mask); | ||
| 592 | *a |= mask; | ||
| 593 | |||
| 594 | return (oldbit != 0); | ||
| 595 | } | ||
| 596 | |||
| 597 | static inline int ext2_clear_bit(int nr, volatile void * addr) | ||
| 598 | { | ||
| 599 | __u8 mask, oldbit; | ||
| 600 | volatile __u8 *a = addr; | ||
| 601 | |||
| 602 | a += (nr >> 3); | ||
| 603 | mask = (1 << (nr & 0x07)); | ||
| 604 | oldbit = (*a & mask); | ||
| 605 | *a &= ~mask; | ||
| 606 | |||
| 607 | return (oldbit != 0); | ||
| 608 | } | ||
| 609 | |||
| 610 | static inline int ext2_test_bit(int nr, const volatile void * addr) | ||
| 611 | { | ||
| 612 | __u32 mask; | ||
| 613 | const volatile __u8 *a = addr; | ||
| 614 | |||
| 615 | a += (nr >> 3); | ||
| 616 | mask = (1 << (nr & 0x07)); | ||
| 617 | |||
| 618 | return ((mask & *a) != 0); | ||
| 619 | } | ||
| 620 | |||
| 621 | #define ext2_find_first_zero_bit(addr, size) \ | ||
| 622 | ext2_find_next_zero_bit((addr), (size), 0) | ||
| 623 | |||
| 624 | static inline unsigned long ext2_find_next_zero_bit(void *addr, | ||
| 625 | unsigned long size, unsigned long offset) | ||
| 626 | { | ||
| 627 | unsigned long *p = ((unsigned long *) addr) + (offset >> 5); | ||
| 628 | unsigned long result = offset & ~31UL; | ||
| 629 | unsigned long tmp; | ||
| 630 | |||
| 631 | if (offset >= size) | ||
| 632 | return size; | ||
| 633 | size -= result; | ||
| 634 | offset &= 31UL; | ||
| 635 | if(offset) { | ||
| 636 | /* We hold the little endian value in tmp, but then the | ||
| 637 | * shift is illegal. So we could keep a big endian value | ||
| 638 | * in tmp, like this: | ||
| 639 | * | ||
| 640 | * tmp = __swab32(*(p++)); | ||
| 641 | * tmp |= ~0UL >> (32-offset); | ||
| 642 | * | ||
| 643 | * but this would decrease preformance, so we change the | ||
| 644 | * shift: | ||
| 645 | */ | ||
| 646 | tmp = *(p++); | ||
| 647 | tmp |= __swab32(~0UL >> (32-offset)); | ||
| 648 | if(size < 32) | ||
| 649 | goto found_first; | ||
| 650 | if(~tmp) | ||
| 651 | goto found_middle; | ||
| 652 | size -= 32; | ||
| 653 | result += 32; | ||
| 654 | } | ||
| 655 | while(size & ~31UL) { | ||
| 656 | if(~(tmp = *(p++))) | ||
| 657 | goto found_middle; | ||
| 658 | result += 32; | ||
| 659 | size -= 32; | ||
| 660 | } | ||
| 661 | if(!size) | ||
| 662 | return result; | ||
| 663 | tmp = *p; | ||
| 664 | |||
| 665 | found_first: | ||
| 666 | /* tmp is little endian, so we would have to swab the shift, | ||
| 667 | * see above. But then we have to swab tmp below for ffz, so | ||
| 668 | * we might as well do this here. | ||
| 669 | */ | ||
| 670 | return result + ffz(__swab32(tmp) | (~0UL << size)); | ||
| 671 | found_middle: | ||
| 672 | return result + ffz(__swab32(tmp)); | ||
| 673 | } | ||
| 674 | #endif | ||
| 675 | |||
| 676 | #define ext2_set_bit_atomic(lock, nr, addr) \ | ||
| 677 | ({ \ | ||
| 678 | int ret; \ | ||
| 679 | spin_lock(lock); \ | ||
| 680 | ret = ext2_set_bit((nr), (addr)); \ | ||
| 681 | spin_unlock(lock); \ | ||
| 682 | ret; \ | ||
| 683 | }) | ||
| 684 | |||
| 685 | #define ext2_clear_bit_atomic(lock, nr, addr) \ | ||
| 686 | ({ \ | ||
| 687 | int ret; \ | ||
| 688 | spin_lock(lock); \ | ||
| 689 | ret = ext2_clear_bit((nr), (addr)); \ | ||
| 690 | spin_unlock(lock); \ | ||
| 691 | ret; \ | ||
| 692 | }) | ||
| 693 | |||
| 694 | /* Bitmap functions for the minix filesystem. */ | ||
| 695 | #define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,addr) | ||
| 696 | #define minix_set_bit(nr,addr) __set_bit(nr,addr) | ||
| 697 | #define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,addr) | ||
| 698 | #define minix_test_bit(nr,addr) test_bit(nr,addr) | ||
| 699 | #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) | ||
| 700 | 267 | ||
| 701 | #endif /* __KERNEL__ */ | 268 | #endif /* __KERNEL__ */ |
| 702 | 269 | ||
diff --git a/include/asm-m32r/mmzone.h b/include/asm-m32r/mmzone.h index adc7970a77ec..9f3b5accda88 100644 --- a/include/asm-m32r/mmzone.h +++ b/include/asm-m32r/mmzone.h | |||
| @@ -21,20 +21,6 @@ extern struct pglist_data *node_data[]; | |||
| 21 | __pgdat->node_start_pfn + __pgdat->node_spanned_pages - 1; \ | 21 | __pgdat->node_start_pfn + __pgdat->node_spanned_pages - 1; \ |
| 22 | }) | 22 | }) |
| 23 | 23 | ||
| 24 | #define pfn_to_page(pfn) \ | ||
| 25 | ({ \ | ||
| 26 | unsigned long __pfn = pfn; \ | ||
| 27 | int __node = pfn_to_nid(__pfn); \ | ||
| 28 | &NODE_DATA(__node)->node_mem_map[node_localnr(__pfn,__node)]; \ | ||
| 29 | }) | ||
| 30 | |||
| 31 | #define page_to_pfn(pg) \ | ||
| 32 | ({ \ | ||
| 33 | struct page *__page = pg; \ | ||
| 34 | struct zone *__zone = page_zone(__page); \ | ||
| 35 | (unsigned long)(__page - __zone->zone_mem_map) \ | ||
| 36 | + __zone->zone_start_pfn; \ | ||
| 37 | }) | ||
| 38 | #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) | 24 | #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) |
| 39 | /* | 25 | /* |
| 40 | * pfn_valid should be made as fast as possible, and the current definition | 26 | * pfn_valid should be made as fast as possible, and the current definition |
diff --git a/include/asm-m32r/page.h b/include/asm-m32r/page.h index 4ab578876361..9ddbc087dbc5 100644 --- a/include/asm-m32r/page.h +++ b/include/asm-m32r/page.h | |||
| @@ -76,9 +76,7 @@ typedef struct { unsigned long pgprot; } pgprot_t; | |||
| 76 | 76 | ||
| 77 | #ifndef CONFIG_DISCONTIGMEM | 77 | #ifndef CONFIG_DISCONTIGMEM |
| 78 | #define PFN_BASE (CONFIG_MEMORY_START >> PAGE_SHIFT) | 78 | #define PFN_BASE (CONFIG_MEMORY_START >> PAGE_SHIFT) |
| 79 | #define pfn_to_page(pfn) (mem_map + ((pfn) - PFN_BASE)) | 79 | #define ARCH_PFN_OFFSET PFN_BASE |
| 80 | #define page_to_pfn(page) \ | ||
| 81 | ((unsigned long)((page) - mem_map) + PFN_BASE) | ||
| 82 | #define pfn_valid(pfn) (((pfn) - PFN_BASE) < max_mapnr) | 80 | #define pfn_valid(pfn) (((pfn) - PFN_BASE) < max_mapnr) |
| 83 | #endif /* !CONFIG_DISCONTIGMEM */ | 81 | #endif /* !CONFIG_DISCONTIGMEM */ |
| 84 | 82 | ||
| @@ -92,6 +90,7 @@ typedef struct { unsigned long pgprot; } pgprot_t; | |||
| 92 | 90 | ||
| 93 | #endif /* __KERNEL__ */ | 91 | #endif /* __KERNEL__ */ |
| 94 | 92 | ||
| 93 | #include <asm-generic/memory_model.h> | ||
| 95 | #include <asm-generic/page.h> | 94 | #include <asm-generic/page.h> |
| 96 | 95 | ||
| 97 | #endif /* _ASM_M32R_PAGE_H */ | 96 | #endif /* _ASM_M32R_PAGE_H */ |
diff --git a/include/asm-m32r/setup.h b/include/asm-m32r/setup.h index 5f028dc26a9b..52f4fa29abfc 100644 --- a/include/asm-m32r/setup.h +++ b/include/asm-m32r/setup.h | |||
| @@ -24,10 +24,6 @@ | |||
| 24 | #define RAMDISK_PROMPT_FLAG (0x8000) | 24 | #define RAMDISK_PROMPT_FLAG (0x8000) |
| 25 | #define RAMDISK_LOAD_FLAG (0x4000) | 25 | #define RAMDISK_LOAD_FLAG (0x4000) |
| 26 | 26 | ||
| 27 | #define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT) | ||
| 28 | #define PFN_DOWN(x) ((x) >> PAGE_SHIFT) | ||
| 29 | #define PFN_PHYS(x) ((x) << PAGE_SHIFT) | ||
| 30 | |||
| 31 | extern unsigned long memory_start; | 27 | extern unsigned long memory_start; |
| 32 | extern unsigned long memory_end; | 28 | extern unsigned long memory_end; |
| 33 | 29 | ||
diff --git a/include/asm-m68k/bitops.h b/include/asm-m68k/bitops.h index 13f4c0048463..1a61fdb56aaf 100644 --- a/include/asm-m68k/bitops.h +++ b/include/asm-m68k/bitops.h | |||
| @@ -310,36 +310,10 @@ static inline int fls(int x) | |||
| 310 | 310 | ||
| 311 | return 32 - cnt; | 311 | return 32 - cnt; |
| 312 | } | 312 | } |
| 313 | #define fls64(x) generic_fls64(x) | ||
| 314 | 313 | ||
| 315 | /* | 314 | #include <asm-generic/bitops/fls64.h> |
| 316 | * Every architecture must define this function. It's the fastest | 315 | #include <asm-generic/bitops/sched.h> |
| 317 | * way of searching a 140-bit bitmap where the first 100 bits are | 316 | #include <asm-generic/bitops/hweight.h> |
| 318 | * unlikely to be set. It's guaranteed that at least one of the 140 | ||
| 319 | * bits is cleared. | ||
| 320 | */ | ||
| 321 | static inline int sched_find_first_bit(const unsigned long *b) | ||
| 322 | { | ||
| 323 | if (unlikely(b[0])) | ||
| 324 | return __ffs(b[0]); | ||
| 325 | if (unlikely(b[1])) | ||
| 326 | return __ffs(b[1]) + 32; | ||
| 327 | if (unlikely(b[2])) | ||
| 328 | return __ffs(b[2]) + 64; | ||
| 329 | if (b[3]) | ||
| 330 | return __ffs(b[3]) + 96; | ||
| 331 | return __ffs(b[4]) + 128; | ||
| 332 | } | ||
| 333 | |||
| 334 | |||
| 335 | /* | ||
| 336 | * hweightN: returns the hamming weight (i.e. the number | ||
| 337 | * of bits set) of a N-bit word | ||
| 338 | */ | ||
| 339 | |||
| 340 | #define hweight32(x) generic_hweight32(x) | ||
| 341 | #define hweight16(x) generic_hweight16(x) | ||
| 342 | #define hweight8(x) generic_hweight8(x) | ||
| 343 | 317 | ||
| 344 | /* Bitmap functions for the minix filesystem */ | 318 | /* Bitmap functions for the minix filesystem */ |
| 345 | 319 | ||
| @@ -365,9 +339,9 @@ static inline int minix_find_first_zero_bit(const void *vaddr, unsigned size) | |||
| 365 | return ((p - addr) << 4) + (res ^ 31); | 339 | return ((p - addr) << 4) + (res ^ 31); |
| 366 | } | 340 | } |
| 367 | 341 | ||
| 368 | #define minix_test_and_set_bit(nr, addr) test_and_set_bit((nr) ^ 16, (unsigned long *)(addr)) | 342 | #define minix_test_and_set_bit(nr, addr) __test_and_set_bit((nr) ^ 16, (unsigned long *)(addr)) |
| 369 | #define minix_set_bit(nr,addr) set_bit((nr) ^ 16, (unsigned long *)(addr)) | 343 | #define minix_set_bit(nr,addr) __set_bit((nr) ^ 16, (unsigned long *)(addr)) |
| 370 | #define minix_test_and_clear_bit(nr, addr) test_and_clear_bit((nr) ^ 16, (unsigned long *)(addr)) | 344 | #define minix_test_and_clear_bit(nr, addr) __test_and_clear_bit((nr) ^ 16, (unsigned long *)(addr)) |
| 371 | 345 | ||
| 372 | static inline int minix_test_bit(int nr, const void *vaddr) | 346 | static inline int minix_test_bit(int nr, const void *vaddr) |
| 373 | { | 347 | { |
| @@ -377,9 +351,9 @@ static inline int minix_test_bit(int nr, const void *vaddr) | |||
| 377 | 351 | ||
| 378 | /* Bitmap functions for the ext2 filesystem. */ | 352 | /* Bitmap functions for the ext2 filesystem. */ |
| 379 | 353 | ||
| 380 | #define ext2_set_bit(nr, addr) test_and_set_bit((nr) ^ 24, (unsigned long *)(addr)) | 354 | #define ext2_set_bit(nr, addr) __test_and_set_bit((nr) ^ 24, (unsigned long *)(addr)) |
| 381 | #define ext2_set_bit_atomic(lock, nr, addr) test_and_set_bit((nr) ^ 24, (unsigned long *)(addr)) | 355 | #define ext2_set_bit_atomic(lock, nr, addr) test_and_set_bit((nr) ^ 24, (unsigned long *)(addr)) |
| 382 | #define ext2_clear_bit(nr, addr) test_and_clear_bit((nr) ^ 24, (unsigned long *)(addr)) | 356 | #define ext2_clear_bit(nr, addr) __test_and_clear_bit((nr) ^ 24, (unsigned long *)(addr)) |
| 383 | #define ext2_clear_bit_atomic(lock, nr, addr) test_and_clear_bit((nr) ^ 24, (unsigned long *)(addr)) | 357 | #define ext2_clear_bit_atomic(lock, nr, addr) test_and_clear_bit((nr) ^ 24, (unsigned long *)(addr)) |
| 384 | 358 | ||
| 385 | static inline int ext2_test_bit(int nr, const void *vaddr) | 359 | static inline int ext2_test_bit(int nr, const void *vaddr) |
diff --git a/include/asm-m68k/stat.h b/include/asm-m68k/stat.h index c4c402a45e21..dd38bc2e9f98 100644 --- a/include/asm-m68k/stat.h +++ b/include/asm-m68k/stat.h | |||
| @@ -60,8 +60,7 @@ struct stat64 { | |||
| 60 | long long st_size; | 60 | long long st_size; |
| 61 | unsigned long st_blksize; | 61 | unsigned long st_blksize; |
| 62 | 62 | ||
| 63 | unsigned long __pad4; /* future possible st_blocks high bits */ | 63 | unsigned long long st_blocks; /* Number 512-byte blocks allocated. */ |
| 64 | unsigned long st_blocks; /* Number 512-byte blocks allocated. */ | ||
| 65 | 64 | ||
| 66 | unsigned long st_atime; | 65 | unsigned long st_atime; |
| 67 | unsigned long st_atime_nsec; | 66 | unsigned long st_atime_nsec; |
diff --git a/include/asm-m68knommu/bitops.h b/include/asm-m68knommu/bitops.h index 25d8a3cfef90..0b68ccd327f7 100644 --- a/include/asm-m68knommu/bitops.h +++ b/include/asm-m68knommu/bitops.h | |||
| @@ -12,104 +12,10 @@ | |||
| 12 | 12 | ||
| 13 | #ifdef __KERNEL__ | 13 | #ifdef __KERNEL__ |
| 14 | 14 | ||
| 15 | /* | 15 | #include <asm-generic/bitops/ffs.h> |
| 16 | * Generic ffs(). | 16 | #include <asm-generic/bitops/__ffs.h> |
| 17 | */ | 17 | #include <asm-generic/bitops/sched.h> |
| 18 | static inline int ffs(int x) | 18 | #include <asm-generic/bitops/ffz.h> |
| 19 | { | ||
| 20 | int r = 1; | ||
| 21 | |||
| 22 | if (!x) | ||
| 23 | return 0; | ||
| 24 | if (!(x & 0xffff)) { | ||
| 25 | x >>= 16; | ||
| 26 | r += 16; | ||
| 27 | } | ||
| 28 | if (!(x & 0xff)) { | ||
| 29 | x >>= 8; | ||
| 30 | r += 8; | ||
| 31 | } | ||
| 32 | if (!(x & 0xf)) { | ||
| 33 | x >>= 4; | ||
| 34 | r += 4; | ||
| 35 | } | ||
| 36 | if (!(x & 3)) { | ||
| 37 | x >>= 2; | ||
| 38 | r += 2; | ||
| 39 | } | ||
| 40 | if (!(x & 1)) { | ||
| 41 | x >>= 1; | ||
| 42 | r += 1; | ||
| 43 | } | ||
| 44 | return r; | ||
| 45 | } | ||
| 46 | |||
| 47 | /* | ||
| 48 | * Generic __ffs(). | ||
| 49 | */ | ||
| 50 | static inline int __ffs(int x) | ||
| 51 | { | ||
| 52 | int r = 0; | ||
| 53 | |||
| 54 | if (!x) | ||
| 55 | return 0; | ||
| 56 | if (!(x & 0xffff)) { | ||
| 57 | x >>= 16; | ||
| 58 | r += 16; | ||
| 59 | } | ||
| 60 | if (!(x & 0xff)) { | ||
| 61 | x >>= 8; | ||
| 62 | r += 8; | ||
| 63 | } | ||
| 64 | if (!(x & 0xf)) { | ||
| 65 | x >>= 4; | ||
| 66 | r += 4; | ||
| 67 | } | ||
| 68 | if (!(x & 3)) { | ||
| 69 | x >>= 2; | ||
| 70 | r += 2; | ||
| 71 | } | ||
| 72 | if (!(x & 1)) { | ||
| 73 | x >>= 1; | ||
| 74 | r += 1; | ||
| 75 | } | ||
| 76 | return r; | ||
| 77 | } | ||
| 78 | |||
| 79 | /* | ||
| 80 | * Every architecture must define this function. It's the fastest | ||
| 81 | * way of searching a 140-bit bitmap where the first 100 bits are | ||
| 82 | * unlikely to be set. It's guaranteed that at least one of the 140 | ||
| 83 | * bits is cleared. | ||
| 84 | */ | ||
| 85 | static inline int sched_find_first_bit(unsigned long *b) | ||
| 86 | { | ||
| 87 | if (unlikely(b[0])) | ||
| 88 | return __ffs(b[0]); | ||
| 89 | if (unlikely(b[1])) | ||
| 90 | return __ffs(b[1]) + 32; | ||
| 91 | if (unlikely(b[2])) | ||
| 92 | return __ffs(b[2]) + 64; | ||
| 93 | if (b[3]) | ||
| 94 | return __ffs(b[3]) + 96; | ||
| 95 | return __ffs(b[4]) + 128; | ||
| 96 | } | ||
| 97 | |||
| 98 | /* | ||
| 99 | * ffz = Find First Zero in word. Undefined if no zero exists, | ||
| 100 | * so code should check against ~0UL first.. | ||
| 101 | */ | ||
| 102 | static __inline__ unsigned long ffz(unsigned long word) | ||
| 103 | { | ||
| 104 | unsigned long result = 0; | ||
| 105 | |||
| 106 | while(word & 1) { | ||
| 107 | result++; | ||
| 108 | word >>= 1; | ||
| 109 | } | ||
| 110 | return result; | ||
| 111 | } | ||
| 112 | |||
| 113 | 19 | ||
| 114 | static __inline__ void set_bit(int nr, volatile unsigned long * addr) | 20 | static __inline__ void set_bit(int nr, volatile unsigned long * addr) |
| 115 | { | 21 | { |
| @@ -254,98 +160,8 @@ static __inline__ int __test_bit(int nr, const volatile unsigned long * addr) | |||
| 254 | __constant_test_bit((nr),(addr)) : \ | 160 | __constant_test_bit((nr),(addr)) : \ |
| 255 | __test_bit((nr),(addr))) | 161 | __test_bit((nr),(addr))) |
| 256 | 162 | ||
| 257 | #define find_first_zero_bit(addr, size) \ | 163 | #include <asm-generic/bitops/find.h> |
| 258 | find_next_zero_bit((addr), (size), 0) | 164 | #include <asm-generic/bitops/hweight.h> |
| 259 | #define find_first_bit(addr, size) \ | ||
| 260 | find_next_bit((addr), (size), 0) | ||
| 261 | |||
| 262 | static __inline__ int find_next_zero_bit (const void * addr, int size, int offset) | ||
| 263 | { | ||
| 264 | unsigned long *p = ((unsigned long *) addr) + (offset >> 5); | ||
| 265 | unsigned long result = offset & ~31UL; | ||
| 266 | unsigned long tmp; | ||
| 267 | |||
| 268 | if (offset >= size) | ||
| 269 | return size; | ||
| 270 | size -= result; | ||
| 271 | offset &= 31UL; | ||
| 272 | if (offset) { | ||
| 273 | tmp = *(p++); | ||
| 274 | tmp |= ~0UL >> (32-offset); | ||
| 275 | if (size < 32) | ||
| 276 | goto found_first; | ||
| 277 | if (~tmp) | ||
| 278 | goto found_middle; | ||
| 279 | size -= 32; | ||
| 280 | result += 32; | ||
| 281 | } | ||
| 282 | while (size & ~31UL) { | ||
| 283 | if (~(tmp = *(p++))) | ||
| 284 | goto found_middle; | ||
| 285 | result += 32; | ||
| 286 | size -= 32; | ||
| 287 | } | ||
| 288 | if (!size) | ||
| 289 | return result; | ||
| 290 | tmp = *p; | ||
| 291 | |||
| 292 | found_first: | ||
| 293 | tmp |= ~0UL << size; | ||
| 294 | found_middle: | ||
| 295 | return result + ffz(tmp); | ||
| 296 | } | ||
| 297 | |||
| 298 | /* | ||
| 299 | * Find next one bit in a bitmap reasonably efficiently. | ||
| 300 | */ | ||
| 301 | static __inline__ unsigned long find_next_bit(const unsigned long *addr, | ||
| 302 | unsigned long size, unsigned long offset) | ||
| 303 | { | ||
| 304 | unsigned int *p = ((unsigned int *) addr) + (offset >> 5); | ||
| 305 | unsigned int result = offset & ~31UL; | ||
| 306 | unsigned int tmp; | ||
| 307 | |||
| 308 | if (offset >= size) | ||
| 309 | return size; | ||
| 310 | size -= result; | ||
| 311 | offset &= 31UL; | ||
| 312 | if (offset) { | ||
| 313 | tmp = *p++; | ||
| 314 | tmp &= ~0UL << offset; | ||
| 315 | if (size < 32) | ||
| 316 | goto found_first; | ||
| 317 | if (tmp) | ||
| 318 | goto found_middle; | ||
| 319 | size -= 32; | ||
| 320 | result += 32; | ||
| 321 | } | ||
| 322 | while (size >= 32) { | ||
| 323 | if ((tmp = *p++) != 0) | ||
| 324 | goto found_middle; | ||
| 325 | result += 32; | ||
| 326 | size -= 32; | ||
| 327 | } | ||
| 328 | if (!size) | ||
| 329 | return result; | ||
| 330 | tmp = *p; | ||
| 331 | |||
| 332 | found_first: | ||
| 333 | tmp &= ~0UL >> (32 - size); | ||
| 334 | if (tmp == 0UL) /* Are any bits set? */ | ||
| 335 | return result + size; /* Nope. */ | ||
| 336 | found_middle: | ||
| 337 | return result + __ffs(tmp); | ||
| 338 | } | ||
| 339 | |||
| 340 | /* | ||
| 341 | * hweightN: returns the hamming weight (i.e. the number | ||
| 342 | * of bits set) of a N-bit word | ||
| 343 | */ | ||
| 344 | |||
| 345 | #define hweight32(x) generic_hweight32(x) | ||
| 346 | #define hweight16(x) generic_hweight16(x) | ||
| 347 | #define hweight8(x) generic_hweight8(x) | ||
| 348 | |||
| 349 | 165 | ||
| 350 | static __inline__ int ext2_set_bit(int nr, volatile void * addr) | 166 | static __inline__ int ext2_set_bit(int nr, volatile void * addr) |
| 351 | { | 167 | { |
| @@ -475,30 +291,11 @@ found_middle: | |||
| 475 | return result + ffz(__swab32(tmp)); | 291 | return result + ffz(__swab32(tmp)); |
| 476 | } | 292 | } |
| 477 | 293 | ||
| 478 | /* Bitmap functions for the minix filesystem. */ | 294 | #include <asm-generic/bitops/minix.h> |
| 479 | #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr) | ||
| 480 | #define minix_set_bit(nr,addr) set_bit(nr,addr) | ||
| 481 | #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr) | ||
| 482 | #define minix_test_bit(nr,addr) test_bit(nr,addr) | ||
| 483 | #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) | ||
| 484 | |||
| 485 | /** | ||
| 486 | * hweightN - returns the hamming weight of a N-bit word | ||
| 487 | * @x: the word to weigh | ||
| 488 | * | ||
| 489 | * The Hamming Weight of a number is the total number of bits set in it. | ||
| 490 | */ | ||
| 491 | |||
| 492 | #define hweight32(x) generic_hweight32(x) | ||
| 493 | #define hweight16(x) generic_hweight16(x) | ||
| 494 | #define hweight8(x) generic_hweight8(x) | ||
| 495 | 295 | ||
| 496 | #endif /* __KERNEL__ */ | 296 | #endif /* __KERNEL__ */ |
| 497 | 297 | ||
| 498 | /* | 298 | #include <asm-generic/bitops/fls.h> |
| 499 | * fls: find last bit set. | 299 | #include <asm-generic/bitops/fls64.h> |
| 500 | */ | ||
| 501 | #define fls(x) generic_fls(x) | ||
| 502 | #define fls64(x) generic_fls64(x) | ||
| 503 | 300 | ||
| 504 | #endif /* _M68KNOMMU_BITOPS_H */ | 301 | #endif /* _M68KNOMMU_BITOPS_H */ |
diff --git a/include/asm-mips/bitops.h b/include/asm-mips/bitops.h index 8e802059fe67..a1728f8c0705 100644 --- a/include/asm-mips/bitops.h +++ b/include/asm-mips/bitops.h | |||
| @@ -105,22 +105,6 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) | |||
| 105 | } | 105 | } |
| 106 | 106 | ||
| 107 | /* | 107 | /* |
| 108 | * __set_bit - Set a bit in memory | ||
| 109 | * @nr: the bit to set | ||
| 110 | * @addr: the address to start counting from | ||
| 111 | * | ||
| 112 | * Unlike set_bit(), this function is non-atomic and may be reordered. | ||
| 113 | * If it's called on the same region of memory simultaneously, the effect | ||
| 114 | * may be that only one operation succeeds. | ||
| 115 | */ | ||
| 116 | static inline void __set_bit(unsigned long nr, volatile unsigned long * addr) | ||
| 117 | { | ||
| 118 | unsigned long * m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | ||
| 119 | |||
| 120 | *m |= 1UL << (nr & SZLONG_MASK); | ||
| 121 | } | ||
| 122 | |||
| 123 | /* | ||
| 124 | * clear_bit - Clears a bit in memory | 108 | * clear_bit - Clears a bit in memory |
| 125 | * @nr: Bit to clear | 109 | * @nr: Bit to clear |
| 126 | * @addr: Address to start counting from | 110 | * @addr: Address to start counting from |
| @@ -169,22 +153,6 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) | |||
| 169 | } | 153 | } |
| 170 | 154 | ||
| 171 | /* | 155 | /* |
| 172 | * __clear_bit - Clears a bit in memory | ||
| 173 | * @nr: Bit to clear | ||
| 174 | * @addr: Address to start counting from | ||
| 175 | * | ||
| 176 | * Unlike clear_bit(), this function is non-atomic and may be reordered. | ||
| 177 | * If it's called on the same region of memory simultaneously, the effect | ||
| 178 | * may be that only one operation succeeds. | ||
| 179 | */ | ||
| 180 | static inline void __clear_bit(unsigned long nr, volatile unsigned long * addr) | ||
| 181 | { | ||
| 182 | unsigned long * m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | ||
| 183 | |||
| 184 | *m &= ~(1UL << (nr & SZLONG_MASK)); | ||
| 185 | } | ||
| 186 | |||
| 187 | /* | ||
| 188 | * change_bit - Toggle a bit in memory | 156 | * change_bit - Toggle a bit in memory |
| 189 | * @nr: Bit to change | 157 | * @nr: Bit to change |
| 190 | * @addr: Address to start counting from | 158 | * @addr: Address to start counting from |
| @@ -235,22 +203,6 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr) | |||
| 235 | } | 203 | } |
| 236 | 204 | ||
| 237 | /* | 205 | /* |
| 238 | * __change_bit - Toggle a bit in memory | ||
| 239 | * @nr: the bit to change | ||
| 240 | * @addr: the address to start counting from | ||
| 241 | * | ||
| 242 | * Unlike change_bit(), this function is non-atomic and may be reordered. | ||
| 243 | * If it's called on the same region of memory simultaneously, the effect | ||
| 244 | * may be that only one operation succeeds. | ||
| 245 | */ | ||
| 246 | static inline void __change_bit(unsigned long nr, volatile unsigned long * addr) | ||
| 247 | { | ||
| 248 | unsigned long * m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | ||
| 249 | |||
| 250 | *m ^= 1UL << (nr & SZLONG_MASK); | ||
| 251 | } | ||
| 252 | |||
| 253 | /* | ||
| 254 | * test_and_set_bit - Set a bit and return its old value | 206 | * test_and_set_bit - Set a bit and return its old value |
| 255 | * @nr: Bit to set | 207 | * @nr: Bit to set |
| 256 | * @addr: Address to count from | 208 | * @addr: Address to count from |
| @@ -321,30 +273,6 @@ static inline int test_and_set_bit(unsigned long nr, | |||
| 321 | } | 273 | } |
| 322 | 274 | ||
| 323 | /* | 275 | /* |
| 324 | * __test_and_set_bit - Set a bit and return its old value | ||
| 325 | * @nr: Bit to set | ||
| 326 | * @addr: Address to count from | ||
| 327 | * | ||
| 328 | * This operation is non-atomic and can be reordered. | ||
| 329 | * If two examples of this operation race, one can appear to succeed | ||
| 330 | * but actually fail. You must protect multiple accesses with a lock. | ||
| 331 | */ | ||
| 332 | static inline int __test_and_set_bit(unsigned long nr, | ||
| 333 | volatile unsigned long *addr) | ||
| 334 | { | ||
| 335 | volatile unsigned long *a = addr; | ||
| 336 | unsigned long mask; | ||
| 337 | int retval; | ||
| 338 | |||
| 339 | a += nr >> SZLONG_LOG; | ||
| 340 | mask = 1UL << (nr & SZLONG_MASK); | ||
| 341 | retval = (mask & *a) != 0; | ||
| 342 | *a |= mask; | ||
| 343 | |||
| 344 | return retval; | ||
| 345 | } | ||
| 346 | |||
| 347 | /* | ||
| 348 | * test_and_clear_bit - Clear a bit and return its old value | 276 | * test_and_clear_bit - Clear a bit and return its old value |
| 349 | * @nr: Bit to clear | 277 | * @nr: Bit to clear |
| 350 | * @addr: Address to count from | 278 | * @addr: Address to count from |
| @@ -417,30 +345,6 @@ static inline int test_and_clear_bit(unsigned long nr, | |||
| 417 | } | 345 | } |
| 418 | 346 | ||
| 419 | /* | 347 | /* |
| 420 | * __test_and_clear_bit - Clear a bit and return its old value | ||
| 421 | * @nr: Bit to clear | ||
| 422 | * @addr: Address to count from | ||
| 423 | * | ||
| 424 | * This operation is non-atomic and can be reordered. | ||
| 425 | * If two examples of this operation race, one can appear to succeed | ||
| 426 | * but actually fail. You must protect multiple accesses with a lock. | ||
| 427 | */ | ||
| 428 | static inline int __test_and_clear_bit(unsigned long nr, | ||
| 429 | volatile unsigned long * addr) | ||
| 430 | { | ||
| 431 | volatile unsigned long *a = addr; | ||
| 432 | unsigned long mask; | ||
| 433 | int retval; | ||
| 434 | |||
| 435 | a += (nr >> SZLONG_LOG); | ||
| 436 | mask = 1UL << (nr & SZLONG_MASK); | ||
| 437 | retval = ((mask & *a) != 0); | ||
| 438 | *a &= ~mask; | ||
| 439 | |||
| 440 | return retval; | ||
| 441 | } | ||
| 442 | |||
| 443 | /* | ||
| 444 | * test_and_change_bit - Change a bit and return its old value | 348 | * test_and_change_bit - Change a bit and return its old value |
| 445 | * @nr: Bit to change | 349 | * @nr: Bit to change |
| 446 | * @addr: Address to count from | 350 | * @addr: Address to count from |
| @@ -509,43 +413,11 @@ static inline int test_and_change_bit(unsigned long nr, | |||
| 509 | } | 413 | } |
| 510 | } | 414 | } |
| 511 | 415 | ||
| 512 | /* | ||
| 513 | * __test_and_change_bit - Change a bit and return its old value | ||
| 514 | * @nr: Bit to change | ||
| 515 | * @addr: Address to count from | ||
| 516 | * | ||
| 517 | * This operation is non-atomic and can be reordered. | ||
| 518 | * If two examples of this operation race, one can appear to succeed | ||
| 519 | * but actually fail. You must protect multiple accesses with a lock. | ||
| 520 | */ | ||
| 521 | static inline int __test_and_change_bit(unsigned long nr, | ||
| 522 | volatile unsigned long *addr) | ||
| 523 | { | ||
| 524 | volatile unsigned long *a = addr; | ||
| 525 | unsigned long mask; | ||
| 526 | int retval; | ||
| 527 | |||
| 528 | a += (nr >> SZLONG_LOG); | ||
| 529 | mask = 1UL << (nr & SZLONG_MASK); | ||
| 530 | retval = ((mask & *a) != 0); | ||
| 531 | *a ^= mask; | ||
| 532 | |||
| 533 | return retval; | ||
| 534 | } | ||
| 535 | |||
| 536 | #undef __bi_flags | 416 | #undef __bi_flags |
| 537 | #undef __bi_local_irq_save | 417 | #undef __bi_local_irq_save |
| 538 | #undef __bi_local_irq_restore | 418 | #undef __bi_local_irq_restore |
| 539 | 419 | ||
| 540 | /* | 420 | #include <asm-generic/bitops/non-atomic.h> |
| 541 | * test_bit - Determine whether a bit is set | ||
| 542 | * @nr: bit number to test | ||
| 543 | * @addr: Address to start counting from | ||
| 544 | */ | ||
| 545 | static inline int test_bit(unsigned long nr, const volatile unsigned long *addr) | ||
| 546 | { | ||
| 547 | return 1UL & (addr[nr >> SZLONG_LOG] >> (nr & SZLONG_MASK)); | ||
| 548 | } | ||
| 549 | 421 | ||
| 550 | /* | 422 | /* |
| 551 | * Return the bit position (0..63) of the most significant 1 bit in a word | 423 | * Return the bit position (0..63) of the most significant 1 bit in a word |
| @@ -580,6 +452,8 @@ static inline int __ilog2(unsigned long x) | |||
| 580 | return 63 - lz; | 452 | return 63 - lz; |
| 581 | } | 453 | } |
| 582 | 454 | ||
| 455 | #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) | ||
| 456 | |||
| 583 | /* | 457 | /* |
| 584 | * __ffs - find first bit in word. | 458 | * __ffs - find first bit in word. |
| 585 | * @word: The word to search | 459 | * @word: The word to search |
| @@ -589,31 +463,7 @@ static inline int __ilog2(unsigned long x) | |||
| 589 | */ | 463 | */ |
| 590 | static inline unsigned long __ffs(unsigned long word) | 464 | static inline unsigned long __ffs(unsigned long word) |
| 591 | { | 465 | { |
| 592 | #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) | ||
| 593 | return __ilog2(word & -word); | 466 | return __ilog2(word & -word); |
| 594 | #else | ||
| 595 | int b = 0, s; | ||
| 596 | |||
| 597 | #ifdef CONFIG_32BIT | ||
| 598 | s = 16; if (word << 16 != 0) s = 0; b += s; word >>= s; | ||
| 599 | s = 8; if (word << 24 != 0) s = 0; b += s; word >>= s; | ||
| 600 | s = 4; if (word << 28 != 0) s = 0; b += s; word >>= s; | ||
| 601 | s = 2; if (word << 30 != 0) s = 0; b += s; word >>= s; | ||
| 602 | s = 1; if (word << 31 != 0) s = 0; b += s; | ||
| 603 | |||
| 604 | return b; | ||
| 605 | #endif | ||
| 606 | #ifdef CONFIG_64BIT | ||
| 607 | s = 32; if (word << 32 != 0) s = 0; b += s; word >>= s; | ||
| 608 | s = 16; if (word << 48 != 0) s = 0; b += s; word >>= s; | ||
| 609 | s = 8; if (word << 56 != 0) s = 0; b += s; word >>= s; | ||
| 610 | s = 4; if (word << 60 != 0) s = 0; b += s; word >>= s; | ||
| 611 | s = 2; if (word << 62 != 0) s = 0; b += s; word >>= s; | ||
| 612 | s = 1; if (word << 63 != 0) s = 0; b += s; | ||
| 613 | |||
| 614 | return b; | ||
| 615 | #endif | ||
| 616 | #endif | ||
| 617 | } | 467 | } |
| 618 | 468 | ||
| 619 | /* | 469 | /* |
| @@ -652,321 +502,38 @@ static inline unsigned long ffz(unsigned long word) | |||
| 652 | */ | 502 | */ |
| 653 | static inline unsigned long fls(unsigned long word) | 503 | static inline unsigned long fls(unsigned long word) |
| 654 | { | 504 | { |
| 655 | #ifdef CONFIG_32BIT | ||
| 656 | #ifdef CONFIG_CPU_MIPS32 | 505 | #ifdef CONFIG_CPU_MIPS32 |
| 657 | __asm__ ("clz %0, %1" : "=r" (word) : "r" (word)); | 506 | __asm__ ("clz %0, %1" : "=r" (word) : "r" (word)); |
| 658 | 507 | ||
| 659 | return 32 - word; | 508 | return 32 - word; |
| 660 | #else | ||
| 661 | { | ||
| 662 | int r = 32, s; | ||
| 663 | |||
| 664 | if (word == 0) | ||
| 665 | return 0; | ||
| 666 | |||
| 667 | s = 16; if ((word & 0xffff0000)) s = 0; r -= s; word <<= s; | ||
| 668 | s = 8; if ((word & 0xff000000)) s = 0; r -= s; word <<= s; | ||
| 669 | s = 4; if ((word & 0xf0000000)) s = 0; r -= s; word <<= s; | ||
| 670 | s = 2; if ((word & 0xc0000000)) s = 0; r -= s; word <<= s; | ||
| 671 | s = 1; if ((word & 0x80000000)) s = 0; r -= s; | ||
| 672 | |||
| 673 | return r; | ||
| 674 | } | ||
| 675 | #endif | 509 | #endif |
| 676 | #endif /* CONFIG_32BIT */ | ||
| 677 | 510 | ||
| 678 | #ifdef CONFIG_64BIT | ||
| 679 | #ifdef CONFIG_CPU_MIPS64 | 511 | #ifdef CONFIG_CPU_MIPS64 |
| 680 | |||
| 681 | __asm__ ("dclz %0, %1" : "=r" (word) : "r" (word)); | 512 | __asm__ ("dclz %0, %1" : "=r" (word) : "r" (word)); |
| 682 | 513 | ||
| 683 | return 64 - word; | 514 | return 64 - word; |
| 684 | #else | ||
| 685 | { | ||
| 686 | int r = 64, s; | ||
| 687 | |||
| 688 | if (word == 0) | ||
| 689 | return 0; | ||
| 690 | |||
| 691 | s = 32; if ((word & 0xffffffff00000000UL)) s = 0; r -= s; word <<= s; | ||
| 692 | s = 16; if ((word & 0xffff000000000000UL)) s = 0; r -= s; word <<= s; | ||
| 693 | s = 8; if ((word & 0xff00000000000000UL)) s = 0; r -= s; word <<= s; | ||
| 694 | s = 4; if ((word & 0xf000000000000000UL)) s = 0; r -= s; word <<= s; | ||
| 695 | s = 2; if ((word & 0xc000000000000000UL)) s = 0; r -= s; word <<= s; | ||
| 696 | s = 1; if ((word & 0x8000000000000000UL)) s = 0; r -= s; | ||
| 697 | |||
| 698 | return r; | ||
| 699 | } | ||
| 700 | #endif | 515 | #endif |
| 701 | #endif /* CONFIG_64BIT */ | ||
| 702 | } | 516 | } |
| 703 | 517 | ||
| 704 | #define fls64(x) generic_fls64(x) | 518 | #else |
| 705 | |||
| 706 | /* | ||
| 707 | * find_next_zero_bit - find the first zero bit in a memory region | ||
| 708 | * @addr: The address to base the search on | ||
| 709 | * @offset: The bitnumber to start searching at | ||
| 710 | * @size: The maximum size to search | ||
| 711 | */ | ||
| 712 | static inline unsigned long find_next_zero_bit(const unsigned long *addr, | ||
| 713 | unsigned long size, unsigned long offset) | ||
| 714 | { | ||
| 715 | const unsigned long *p = addr + (offset >> SZLONG_LOG); | ||
| 716 | unsigned long result = offset & ~SZLONG_MASK; | ||
| 717 | unsigned long tmp; | ||
| 718 | |||
| 719 | if (offset >= size) | ||
| 720 | return size; | ||
| 721 | size -= result; | ||
| 722 | offset &= SZLONG_MASK; | ||
| 723 | if (offset) { | ||
| 724 | tmp = *(p++); | ||
| 725 | tmp |= ~0UL >> (_MIPS_SZLONG-offset); | ||
| 726 | if (size < _MIPS_SZLONG) | ||
| 727 | goto found_first; | ||
| 728 | if (~tmp) | ||
| 729 | goto found_middle; | ||
| 730 | size -= _MIPS_SZLONG; | ||
| 731 | result += _MIPS_SZLONG; | ||
| 732 | } | ||
| 733 | while (size & ~SZLONG_MASK) { | ||
| 734 | if (~(tmp = *(p++))) | ||
| 735 | goto found_middle; | ||
| 736 | result += _MIPS_SZLONG; | ||
| 737 | size -= _MIPS_SZLONG; | ||
| 738 | } | ||
| 739 | if (!size) | ||
| 740 | return result; | ||
| 741 | tmp = *p; | ||
| 742 | |||
| 743 | found_first: | ||
| 744 | tmp |= ~0UL << size; | ||
| 745 | if (tmp == ~0UL) /* Are any bits zero? */ | ||
| 746 | return result + size; /* Nope. */ | ||
| 747 | found_middle: | ||
| 748 | return result + ffz(tmp); | ||
| 749 | } | ||
| 750 | 519 | ||
| 751 | #define find_first_zero_bit(addr, size) \ | 520 | #include <asm-generic/bitops/__ffs.h> |
| 752 | find_next_zero_bit((addr), (size), 0) | 521 | #include <asm-generic/bitops/ffs.h> |
| 522 | #include <asm-generic/bitops/ffz.h> | ||
| 523 | #include <asm-generic/bitops/fls.h> | ||
| 753 | 524 | ||
| 754 | /* | 525 | #endif /*defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) */ |
| 755 | * find_next_bit - find the next set bit in a memory region | ||
| 756 | * @addr: The address to base the search on | ||
| 757 | * @offset: The bitnumber to start searching at | ||
| 758 | * @size: The maximum size to search | ||
| 759 | */ | ||
| 760 | static inline unsigned long find_next_bit(const unsigned long *addr, | ||
| 761 | unsigned long size, unsigned long offset) | ||
| 762 | { | ||
| 763 | const unsigned long *p = addr + (offset >> SZLONG_LOG); | ||
| 764 | unsigned long result = offset & ~SZLONG_MASK; | ||
| 765 | unsigned long tmp; | ||
| 766 | |||
| 767 | if (offset >= size) | ||
| 768 | return size; | ||
| 769 | size -= result; | ||
| 770 | offset &= SZLONG_MASK; | ||
| 771 | if (offset) { | ||
| 772 | tmp = *(p++); | ||
| 773 | tmp &= ~0UL << offset; | ||
| 774 | if (size < _MIPS_SZLONG) | ||
| 775 | goto found_first; | ||
| 776 | if (tmp) | ||
| 777 | goto found_middle; | ||
| 778 | size -= _MIPS_SZLONG; | ||
| 779 | result += _MIPS_SZLONG; | ||
| 780 | } | ||
| 781 | while (size & ~SZLONG_MASK) { | ||
| 782 | if ((tmp = *(p++))) | ||
| 783 | goto found_middle; | ||
| 784 | result += _MIPS_SZLONG; | ||
| 785 | size -= _MIPS_SZLONG; | ||
| 786 | } | ||
| 787 | if (!size) | ||
| 788 | return result; | ||
| 789 | tmp = *p; | ||
| 790 | |||
| 791 | found_first: | ||
| 792 | tmp &= ~0UL >> (_MIPS_SZLONG - size); | ||
| 793 | if (tmp == 0UL) /* Are any bits set? */ | ||
| 794 | return result + size; /* Nope. */ | ||
| 795 | found_middle: | ||
| 796 | return result + __ffs(tmp); | ||
| 797 | } | ||
| 798 | 526 | ||
| 799 | /* | 527 | #include <asm-generic/bitops/fls64.h> |
| 800 | * find_first_bit - find the first set bit in a memory region | 528 | #include <asm-generic/bitops/find.h> |
| 801 | * @addr: The address to start the search at | ||
| 802 | * @size: The maximum size to search | ||
| 803 | * | ||
| 804 | * Returns the bit-number of the first set bit, not the number of the byte | ||
| 805 | * containing a bit. | ||
| 806 | */ | ||
| 807 | #define find_first_bit(addr, size) \ | ||
| 808 | find_next_bit((addr), (size), 0) | ||
| 809 | 529 | ||
| 810 | #ifdef __KERNEL__ | 530 | #ifdef __KERNEL__ |
| 811 | 531 | ||
| 812 | /* | 532 | #include <asm-generic/bitops/sched.h> |
| 813 | * Every architecture must define this function. It's the fastest | 533 | #include <asm-generic/bitops/hweight.h> |
| 814 | * way of searching a 140-bit bitmap where the first 100 bits are | 534 | #include <asm-generic/bitops/ext2-non-atomic.h> |
| 815 | * unlikely to be set. It's guaranteed that at least one of the 140 | 535 | #include <asm-generic/bitops/ext2-atomic.h> |
| 816 | * bits is cleared. | 536 | #include <asm-generic/bitops/minix.h> |
| 817 | */ | ||
| 818 | static inline int sched_find_first_bit(const unsigned long *b) | ||
| 819 | { | ||
| 820 | #ifdef CONFIG_32BIT | ||
| 821 | if (unlikely(b[0])) | ||
| 822 | return __ffs(b[0]); | ||
| 823 | if (unlikely(b[1])) | ||
| 824 | return __ffs(b[1]) + 32; | ||
| 825 | if (unlikely(b[2])) | ||
| 826 | return __ffs(b[2]) + 64; | ||
| 827 | if (b[3]) | ||
| 828 | return __ffs(b[3]) + 96; | ||
| 829 | return __ffs(b[4]) + 128; | ||
| 830 | #endif | ||
| 831 | #ifdef CONFIG_64BIT | ||
| 832 | if (unlikely(b[0])) | ||
| 833 | return __ffs(b[0]); | ||
| 834 | if (unlikely(b[1])) | ||
| 835 | return __ffs(b[1]) + 64; | ||
| 836 | return __ffs(b[2]) + 128; | ||
| 837 | #endif | ||
| 838 | } | ||
| 839 | |||
| 840 | /* | ||
| 841 | * hweightN - returns the hamming weight of a N-bit word | ||
| 842 | * @x: the word to weigh | ||
| 843 | * | ||
| 844 | * The Hamming Weight of a number is the total number of bits set in it. | ||
| 845 | */ | ||
| 846 | |||
| 847 | #define hweight64(x) generic_hweight64(x) | ||
| 848 | #define hweight32(x) generic_hweight32(x) | ||
| 849 | #define hweight16(x) generic_hweight16(x) | ||
| 850 | #define hweight8(x) generic_hweight8(x) | ||
| 851 | |||
| 852 | static inline int __test_and_set_le_bit(unsigned long nr, unsigned long *addr) | ||
| 853 | { | ||
| 854 | unsigned char *ADDR = (unsigned char *) addr; | ||
| 855 | int mask, retval; | ||
| 856 | |||
| 857 | ADDR += nr >> 3; | ||
| 858 | mask = 1 << (nr & 0x07); | ||
| 859 | retval = (mask & *ADDR) != 0; | ||
| 860 | *ADDR |= mask; | ||
| 861 | |||
| 862 | return retval; | ||
| 863 | } | ||
| 864 | |||
| 865 | static inline int __test_and_clear_le_bit(unsigned long nr, unsigned long *addr) | ||
| 866 | { | ||
| 867 | unsigned char *ADDR = (unsigned char *) addr; | ||
| 868 | int mask, retval; | ||
| 869 | |||
| 870 | ADDR += nr >> 3; | ||
| 871 | mask = 1 << (nr & 0x07); | ||
| 872 | retval = (mask & *ADDR) != 0; | ||
| 873 | *ADDR &= ~mask; | ||
| 874 | |||
| 875 | return retval; | ||
| 876 | } | ||
| 877 | |||
| 878 | static inline int test_le_bit(unsigned long nr, const unsigned long * addr) | ||
| 879 | { | ||
| 880 | const unsigned char *ADDR = (const unsigned char *) addr; | ||
| 881 | int mask; | ||
| 882 | |||
| 883 | ADDR += nr >> 3; | ||
| 884 | mask = 1 << (nr & 0x07); | ||
| 885 | |||
| 886 | return ((mask & *ADDR) != 0); | ||
| 887 | } | ||
| 888 | |||
| 889 | static inline unsigned long find_next_zero_le_bit(unsigned long *addr, | ||
| 890 | unsigned long size, unsigned long offset) | ||
| 891 | { | ||
| 892 | unsigned long *p = ((unsigned long *) addr) + (offset >> SZLONG_LOG); | ||
| 893 | unsigned long result = offset & ~SZLONG_MASK; | ||
| 894 | unsigned long tmp; | ||
| 895 | |||
| 896 | if (offset >= size) | ||
| 897 | return size; | ||
| 898 | size -= result; | ||
| 899 | offset &= SZLONG_MASK; | ||
| 900 | if (offset) { | ||
| 901 | tmp = cpu_to_lelongp(p++); | ||
| 902 | tmp |= ~0UL >> (_MIPS_SZLONG-offset); /* bug or feature ? */ | ||
| 903 | if (size < _MIPS_SZLONG) | ||
| 904 | goto found_first; | ||
| 905 | if (~tmp) | ||
| 906 | goto found_middle; | ||
| 907 | size -= _MIPS_SZLONG; | ||
| 908 | result += _MIPS_SZLONG; | ||
| 909 | } | ||
| 910 | while (size & ~SZLONG_MASK) { | ||
| 911 | if (~(tmp = cpu_to_lelongp(p++))) | ||
| 912 | goto found_middle; | ||
| 913 | result += _MIPS_SZLONG; | ||
| 914 | size -= _MIPS_SZLONG; | ||
| 915 | } | ||
| 916 | if (!size) | ||
| 917 | return result; | ||
| 918 | tmp = cpu_to_lelongp(p); | ||
| 919 | |||
| 920 | found_first: | ||
| 921 | tmp |= ~0UL << size; | ||
| 922 | if (tmp == ~0UL) /* Are any bits zero? */ | ||
| 923 | return result + size; /* Nope. */ | ||
| 924 | |||
| 925 | found_middle: | ||
| 926 | return result + ffz(tmp); | ||
| 927 | } | ||
| 928 | |||
| 929 | #define find_first_zero_le_bit(addr, size) \ | ||
| 930 | find_next_zero_le_bit((addr), (size), 0) | ||
| 931 | |||
| 932 | #define ext2_set_bit(nr,addr) \ | ||
| 933 | __test_and_set_le_bit((nr),(unsigned long*)addr) | ||
| 934 | #define ext2_clear_bit(nr, addr) \ | ||
| 935 | __test_and_clear_le_bit((nr),(unsigned long*)addr) | ||
| 936 | #define ext2_set_bit_atomic(lock, nr, addr) \ | ||
| 937 | ({ \ | ||
| 938 | int ret; \ | ||
| 939 | spin_lock(lock); \ | ||
| 940 | ret = ext2_set_bit((nr), (addr)); \ | ||
| 941 | spin_unlock(lock); \ | ||
| 942 | ret; \ | ||
| 943 | }) | ||
| 944 | |||
| 945 | #define ext2_clear_bit_atomic(lock, nr, addr) \ | ||
| 946 | ({ \ | ||
| 947 | int ret; \ | ||
| 948 | spin_lock(lock); \ | ||
| 949 | ret = ext2_clear_bit((nr), (addr)); \ | ||
| 950 | spin_unlock(lock); \ | ||
| 951 | ret; \ | ||
| 952 | }) | ||
| 953 | #define ext2_test_bit(nr, addr) test_le_bit((nr),(unsigned long*)addr) | ||
| 954 | #define ext2_find_first_zero_bit(addr, size) \ | ||
| 955 | find_first_zero_le_bit((unsigned long*)addr, size) | ||
| 956 | #define ext2_find_next_zero_bit(addr, size, off) \ | ||
| 957 | find_next_zero_le_bit((unsigned long*)addr, size, off) | ||
| 958 | |||
| 959 | /* | ||
| 960 | * Bitmap functions for the minix filesystem. | ||
| 961 | * | ||
| 962 | * FIXME: These assume that Minix uses the native byte/bitorder. | ||
| 963 | * This limits the Minix filesystem's value for data exchange very much. | ||
| 964 | */ | ||
| 965 | #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr) | ||
| 966 | #define minix_set_bit(nr,addr) set_bit(nr,addr) | ||
| 967 | #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr) | ||
| 968 | #define minix_test_bit(nr,addr) test_bit(nr,addr) | ||
| 969 | #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) | ||
| 970 | 537 | ||
| 971 | #endif /* __KERNEL__ */ | 538 | #endif /* __KERNEL__ */ |
| 972 | 539 | ||
diff --git a/include/asm-mips/compat.h b/include/asm-mips/compat.h index 0012bd804d2d..986511db54a6 100644 --- a/include/asm-mips/compat.h +++ b/include/asm-mips/compat.h | |||
| @@ -133,6 +133,11 @@ static inline void __user *compat_ptr(compat_uptr_t uptr) | |||
| 133 | return (void __user *)(long)uptr; | 133 | return (void __user *)(long)uptr; |
| 134 | } | 134 | } |
| 135 | 135 | ||
| 136 | static inline compat_uptr_t ptr_to_compat(void __user *uptr) | ||
| 137 | { | ||
| 138 | return (u32)(unsigned long)uptr; | ||
| 139 | } | ||
| 140 | |||
| 136 | static inline void __user *compat_alloc_user_space(long len) | 141 | static inline void __user *compat_alloc_user_space(long len) |
| 137 | { | 142 | { |
| 138 | struct pt_regs *regs = (struct pt_regs *) | 143 | struct pt_regs *regs = (struct pt_regs *) |
diff --git a/include/asm-mips/futex.h b/include/asm-mips/futex.h index 2454c44a8f54..a554089991f2 100644 --- a/include/asm-mips/futex.h +++ b/include/asm-mips/futex.h | |||
| @@ -99,5 +99,11 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) | |||
| 99 | return ret; | 99 | return ret; |
| 100 | } | 100 | } |
| 101 | 101 | ||
| 102 | static inline int | ||
| 103 | futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) | ||
| 104 | { | ||
| 105 | return -ENOSYS; | ||
| 106 | } | ||
| 107 | |||
| 102 | #endif | 108 | #endif |
| 103 | #endif | 109 | #endif |
diff --git a/include/asm-mips/mmzone.h b/include/asm-mips/mmzone.h index 011caebac369..7bde4432092b 100644 --- a/include/asm-mips/mmzone.h +++ b/include/asm-mips/mmzone.h | |||
| @@ -22,20 +22,6 @@ | |||
| 22 | NODE_DATA(__n)->node_spanned_pages) : 0);\ | 22 | NODE_DATA(__n)->node_spanned_pages) : 0);\ |
| 23 | }) | 23 | }) |
| 24 | 24 | ||
| 25 | #define pfn_to_page(pfn) \ | ||
| 26 | ({ \ | ||
| 27 | unsigned long __pfn = (pfn); \ | ||
| 28 | pg_data_t *__pg = NODE_DATA(pfn_to_nid(__pfn)); \ | ||
| 29 | __pg->node_mem_map + (__pfn - __pg->node_start_pfn); \ | ||
| 30 | }) | ||
| 31 | |||
| 32 | #define page_to_pfn(p) \ | ||
| 33 | ({ \ | ||
| 34 | struct page *__p = (p); \ | ||
| 35 | struct zone *__z = page_zone(__p); \ | ||
| 36 | ((__p - __z->zone_mem_map) + __z->zone_start_pfn); \ | ||
| 37 | }) | ||
| 38 | |||
| 39 | /* XXX: FIXME -- wli */ | 25 | /* XXX: FIXME -- wli */ |
| 40 | #define kern_addr_valid(addr) (0) | 26 | #define kern_addr_valid(addr) (0) |
| 41 | 27 | ||
diff --git a/include/asm-mips/page.h b/include/asm-mips/page.h index ee25a779bf49..a1eab136ff6c 100644 --- a/include/asm-mips/page.h +++ b/include/asm-mips/page.h | |||
| @@ -140,8 +140,6 @@ typedef struct { unsigned long pgprot; } pgprot_t; | |||
| 140 | #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) | 140 | #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) |
| 141 | 141 | ||
| 142 | #ifndef CONFIG_NEED_MULTIPLE_NODES | 142 | #ifndef CONFIG_NEED_MULTIPLE_NODES |
| 143 | #define pfn_to_page(pfn) (mem_map + (pfn)) | ||
| 144 | #define page_to_pfn(page) ((unsigned long)((page) - mem_map)) | ||
| 145 | #define pfn_valid(pfn) ((pfn) < max_mapnr) | 143 | #define pfn_valid(pfn) ((pfn) < max_mapnr) |
| 146 | #endif | 144 | #endif |
| 147 | 145 | ||
| @@ -160,6 +158,7 @@ typedef struct { unsigned long pgprot; } pgprot_t; | |||
| 160 | #define WANT_PAGE_VIRTUAL | 158 | #define WANT_PAGE_VIRTUAL |
| 161 | #endif | 159 | #endif |
| 162 | 160 | ||
| 161 | #include <asm-generic/memory_model.h> | ||
| 163 | #include <asm-generic/page.h> | 162 | #include <asm-generic/page.h> |
| 164 | 163 | ||
| 165 | #endif /* _ASM_PAGE_H */ | 164 | #endif /* _ASM_PAGE_H */ |
diff --git a/include/asm-mips/time.h b/include/asm-mips/time.h index 9cc3564cc2c9..d897c8bb554d 100644 --- a/include/asm-mips/time.h +++ b/include/asm-mips/time.h | |||
| @@ -26,14 +26,14 @@ extern spinlock_t rtc_lock; | |||
| 26 | 26 | ||
| 27 | /* | 27 | /* |
| 28 | * RTC ops. By default, they point to no-RTC functions. | 28 | * RTC ops. By default, they point to no-RTC functions. |
| 29 | * rtc_get_time - mktime(year, mon, day, hour, min, sec) in seconds. | 29 | * rtc_mips_get_time - mktime(year, mon, day, hour, min, sec) in seconds. |
| 30 | * rtc_set_time - reverse the above translation and set time to RTC. | 30 | * rtc_mips_set_time - reverse the above translation and set time to RTC. |
| 31 | * rtc_set_mmss - similar to rtc_set_time, but only min and sec need | 31 | * rtc_mips_set_mmss - similar to rtc_set_time, but only min and sec need |
| 32 | * to be set. Used by RTC sync-up. | 32 | * to be set. Used by RTC sync-up. |
| 33 | */ | 33 | */ |
| 34 | extern unsigned long (*rtc_get_time)(void); | 34 | extern unsigned long (*rtc_mips_get_time)(void); |
| 35 | extern int (*rtc_set_time)(unsigned long); | 35 | extern int (*rtc_mips_set_time)(unsigned long); |
| 36 | extern int (*rtc_set_mmss)(unsigned long); | 36 | extern int (*rtc_mips_set_mmss)(unsigned long); |
| 37 | 37 | ||
| 38 | /* | 38 | /* |
| 39 | * Timer interrupt functions. | 39 | * Timer interrupt functions. |
diff --git a/include/asm-mips/types.h b/include/asm-mips/types.h index 421b3aea14cc..cd2813d8e136 100644 --- a/include/asm-mips/types.h +++ b/include/asm-mips/types.h | |||
| @@ -99,6 +99,11 @@ typedef u64 sector_t; | |||
| 99 | #define HAVE_SECTOR_T | 99 | #define HAVE_SECTOR_T |
| 100 | #endif | 100 | #endif |
| 101 | 101 | ||
| 102 | #ifdef CONFIG_LSF | ||
| 103 | typedef u64 blkcnt_t; | ||
| 104 | #define HAVE_BLKCNT_T | ||
| 105 | #endif | ||
| 106 | |||
| 102 | #endif /* __ASSEMBLY__ */ | 107 | #endif /* __ASSEMBLY__ */ |
| 103 | 108 | ||
| 104 | #endif /* __KERNEL__ */ | 109 | #endif /* __KERNEL__ */ |
diff --git a/include/asm-parisc/bitops.h b/include/asm-parisc/bitops.h index 15d8c2b51584..900561922c4c 100644 --- a/include/asm-parisc/bitops.h +++ b/include/asm-parisc/bitops.h | |||
| @@ -35,13 +35,6 @@ static __inline__ void set_bit(int nr, volatile unsigned long * addr) | |||
| 35 | _atomic_spin_unlock_irqrestore(addr, flags); | 35 | _atomic_spin_unlock_irqrestore(addr, flags); |
| 36 | } | 36 | } |
| 37 | 37 | ||
| 38 | static __inline__ void __set_bit(unsigned long nr, volatile unsigned long * addr) | ||
| 39 | { | ||
| 40 | unsigned long *m = (unsigned long *) addr + (nr >> SHIFT_PER_LONG); | ||
| 41 | |||
| 42 | *m |= 1UL << CHOP_SHIFTCOUNT(nr); | ||
| 43 | } | ||
| 44 | |||
| 45 | static __inline__ void clear_bit(int nr, volatile unsigned long * addr) | 38 | static __inline__ void clear_bit(int nr, volatile unsigned long * addr) |
| 46 | { | 39 | { |
| 47 | unsigned long mask = ~(1UL << CHOP_SHIFTCOUNT(nr)); | 40 | unsigned long mask = ~(1UL << CHOP_SHIFTCOUNT(nr)); |
| @@ -53,13 +46,6 @@ static __inline__ void clear_bit(int nr, volatile unsigned long * addr) | |||
| 53 | _atomic_spin_unlock_irqrestore(addr, flags); | 46 | _atomic_spin_unlock_irqrestore(addr, flags); |
| 54 | } | 47 | } |
| 55 | 48 | ||
| 56 | static __inline__ void __clear_bit(unsigned long nr, volatile unsigned long * addr) | ||
| 57 | { | ||
| 58 | unsigned long *m = (unsigned long *) addr + (nr >> SHIFT_PER_LONG); | ||
| 59 | |||
| 60 | *m &= ~(1UL << CHOP_SHIFTCOUNT(nr)); | ||
| 61 | } | ||
| 62 | |||
| 63 | static __inline__ void change_bit(int nr, volatile unsigned long * addr) | 49 | static __inline__ void change_bit(int nr, volatile unsigned long * addr) |
| 64 | { | 50 | { |
| 65 | unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); | 51 | unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); |
| @@ -71,13 +57,6 @@ static __inline__ void change_bit(int nr, volatile unsigned long * addr) | |||
| 71 | _atomic_spin_unlock_irqrestore(addr, flags); | 57 | _atomic_spin_unlock_irqrestore(addr, flags); |
| 72 | } | 58 | } |
| 73 | 59 | ||
| 74 | static __inline__ void __change_bit(unsigned long nr, volatile unsigned long * addr) | ||
| 75 | { | ||
| 76 | unsigned long *m = (unsigned long *) addr + (nr >> SHIFT_PER_LONG); | ||
| 77 | |||
| 78 | *m ^= 1UL << CHOP_SHIFTCOUNT(nr); | ||
| 79 | } | ||
| 80 | |||
| 81 | static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr) | 60 | static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr) |
| 82 | { | 61 | { |
| 83 | unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); | 62 | unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); |
| @@ -93,18 +72,6 @@ static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr) | |||
| 93 | return (oldbit & mask) ? 1 : 0; | 72 | return (oldbit & mask) ? 1 : 0; |
| 94 | } | 73 | } |
| 95 | 74 | ||
| 96 | static __inline__ int __test_and_set_bit(int nr, volatile unsigned long * address) | ||
| 97 | { | ||
| 98 | unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); | ||
| 99 | unsigned long oldbit; | ||
| 100 | unsigned long *addr = (unsigned long *)address + (nr >> SHIFT_PER_LONG); | ||
| 101 | |||
| 102 | oldbit = *addr; | ||
| 103 | *addr = oldbit | mask; | ||
| 104 | |||
| 105 | return (oldbit & mask) ? 1 : 0; | ||
| 106 | } | ||
| 107 | |||
| 108 | static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr) | 75 | static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr) |
| 109 | { | 76 | { |
| 110 | unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); | 77 | unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); |
| @@ -120,18 +87,6 @@ static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr) | |||
| 120 | return (oldbit & mask) ? 1 : 0; | 87 | return (oldbit & mask) ? 1 : 0; |
| 121 | } | 88 | } |
| 122 | 89 | ||
| 123 | static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long * address) | ||
| 124 | { | ||
| 125 | unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); | ||
| 126 | unsigned long *addr = (unsigned long *)address + (nr >> SHIFT_PER_LONG); | ||
| 127 | unsigned long oldbit; | ||
| 128 | |||
| 129 | oldbit = *addr; | ||
| 130 | *addr = oldbit & ~mask; | ||
| 131 | |||
| 132 | return (oldbit & mask) ? 1 : 0; | ||
| 133 | } | ||
| 134 | |||
| 135 | static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr) | 90 | static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr) |
| 136 | { | 91 | { |
| 137 | unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); | 92 | unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); |
| @@ -147,25 +102,7 @@ static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr) | |||
| 147 | return (oldbit & mask) ? 1 : 0; | 102 | return (oldbit & mask) ? 1 : 0; |
| 148 | } | 103 | } |
| 149 | 104 | ||
| 150 | static __inline__ int __test_and_change_bit(int nr, volatile unsigned long * address) | 105 | #include <asm-generic/bitops/non-atomic.h> |
| 151 | { | ||
| 152 | unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); | ||
| 153 | unsigned long *addr = (unsigned long *)address + (nr >> SHIFT_PER_LONG); | ||
| 154 | unsigned long oldbit; | ||
| 155 | |||
| 156 | oldbit = *addr; | ||
| 157 | *addr = oldbit ^ mask; | ||
| 158 | |||
| 159 | return (oldbit & mask) ? 1 : 0; | ||
| 160 | } | ||
| 161 | |||
| 162 | static __inline__ int test_bit(int nr, const volatile unsigned long *address) | ||
| 163 | { | ||
| 164 | unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); | ||
| 165 | const unsigned long *addr = (const unsigned long *)address + (nr >> SHIFT_PER_LONG); | ||
| 166 | |||
| 167 | return !!(*addr & mask); | ||
| 168 | } | ||
| 169 | 106 | ||
| 170 | #ifdef __KERNEL__ | 107 | #ifdef __KERNEL__ |
| 171 | 108 | ||
| @@ -219,8 +156,7 @@ static __inline__ unsigned long __ffs(unsigned long x) | |||
| 219 | return ret; | 156 | return ret; |
| 220 | } | 157 | } |
| 221 | 158 | ||
| 222 | /* Undefined if no bit is zero. */ | 159 | #include <asm-generic/bitops/ffz.h> |
| 223 | #define ffz(x) __ffs(~x) | ||
| 224 | 160 | ||
| 225 | /* | 161 | /* |
| 226 | * ffs: find first bit set. returns 1 to BITS_PER_LONG or 0 (if none set) | 162 | * ffs: find first bit set. returns 1 to BITS_PER_LONG or 0 (if none set) |
| @@ -263,155 +199,22 @@ static __inline__ int fls(int x) | |||
| 263 | 199 | ||
| 264 | return ret; | 200 | return ret; |
| 265 | } | 201 | } |
| 266 | #define fls64(x) generic_fls64(x) | ||
| 267 | 202 | ||
| 268 | /* | 203 | #include <asm-generic/bitops/fls64.h> |
| 269 | * hweightN: returns the hamming weight (i.e. the number | 204 | #include <asm-generic/bitops/hweight.h> |
| 270 | * of bits set) of a N-bit word | 205 | #include <asm-generic/bitops/sched.h> |
| 271 | */ | ||
| 272 | #define hweight64(x) generic_hweight64(x) | ||
| 273 | #define hweight32(x) generic_hweight32(x) | ||
| 274 | #define hweight16(x) generic_hweight16(x) | ||
| 275 | #define hweight8(x) generic_hweight8(x) | ||
| 276 | |||
| 277 | /* | ||
| 278 | * Every architecture must define this function. It's the fastest | ||
| 279 | * way of searching a 140-bit bitmap where the first 100 bits are | ||
| 280 | * unlikely to be set. It's guaranteed that at least one of the 140 | ||
| 281 | * bits is cleared. | ||
| 282 | */ | ||
| 283 | static inline int sched_find_first_bit(const unsigned long *b) | ||
| 284 | { | ||
| 285 | #ifdef __LP64__ | ||
| 286 | if (unlikely(b[0])) | ||
| 287 | return __ffs(b[0]); | ||
| 288 | if (unlikely(b[1])) | ||
| 289 | return __ffs(b[1]) + 64; | ||
| 290 | return __ffs(b[2]) + 128; | ||
| 291 | #else | ||
| 292 | if (unlikely(b[0])) | ||
| 293 | return __ffs(b[0]); | ||
| 294 | if (unlikely(b[1])) | ||
| 295 | return __ffs(b[1]) + 32; | ||
| 296 | if (unlikely(b[2])) | ||
| 297 | return __ffs(b[2]) + 64; | ||
| 298 | if (b[3]) | ||
| 299 | return __ffs(b[3]) + 96; | ||
| 300 | return __ffs(b[4]) + 128; | ||
| 301 | #endif | ||
| 302 | } | ||
| 303 | 206 | ||
| 304 | #endif /* __KERNEL__ */ | 207 | #endif /* __KERNEL__ */ |
| 305 | 208 | ||
| 306 | /* | 209 | #include <asm-generic/bitops/find.h> |
| 307 | * This implementation of find_{first,next}_zero_bit was stolen from | ||
| 308 | * Linus' asm-alpha/bitops.h. | ||
| 309 | */ | ||
| 310 | #define find_first_zero_bit(addr, size) \ | ||
| 311 | find_next_zero_bit((addr), (size), 0) | ||
| 312 | |||
| 313 | static __inline__ unsigned long find_next_zero_bit(const void * addr, unsigned long size, unsigned long offset) | ||
| 314 | { | ||
| 315 | const unsigned long * p = ((unsigned long *) addr) + (offset >> SHIFT_PER_LONG); | ||
| 316 | unsigned long result = offset & ~(BITS_PER_LONG-1); | ||
| 317 | unsigned long tmp; | ||
| 318 | |||
| 319 | if (offset >= size) | ||
| 320 | return size; | ||
| 321 | size -= result; | ||
| 322 | offset &= (BITS_PER_LONG-1); | ||
| 323 | if (offset) { | ||
| 324 | tmp = *(p++); | ||
| 325 | tmp |= ~0UL >> (BITS_PER_LONG-offset); | ||
| 326 | if (size < BITS_PER_LONG) | ||
| 327 | goto found_first; | ||
| 328 | if (~tmp) | ||
| 329 | goto found_middle; | ||
| 330 | size -= BITS_PER_LONG; | ||
| 331 | result += BITS_PER_LONG; | ||
| 332 | } | ||
| 333 | while (size & ~(BITS_PER_LONG -1)) { | ||
| 334 | if (~(tmp = *(p++))) | ||
| 335 | goto found_middle; | ||
| 336 | result += BITS_PER_LONG; | ||
| 337 | size -= BITS_PER_LONG; | ||
| 338 | } | ||
| 339 | if (!size) | ||
| 340 | return result; | ||
| 341 | tmp = *p; | ||
| 342 | found_first: | ||
| 343 | tmp |= ~0UL << size; | ||
| 344 | found_middle: | ||
| 345 | return result + ffz(tmp); | ||
| 346 | } | ||
| 347 | |||
| 348 | static __inline__ unsigned long find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset) | ||
| 349 | { | ||
| 350 | const unsigned long *p = addr + (offset >> SHIFT_PER_LONG); | ||
| 351 | unsigned long result = offset & ~(BITS_PER_LONG-1); | ||
| 352 | unsigned long tmp; | ||
| 353 | |||
| 354 | if (offset >= size) | ||
| 355 | return size; | ||
| 356 | size -= result; | ||
| 357 | offset &= (BITS_PER_LONG-1); | ||
| 358 | if (offset) { | ||
| 359 | tmp = *(p++); | ||
| 360 | tmp &= (~0UL << offset); | ||
| 361 | if (size < BITS_PER_LONG) | ||
| 362 | goto found_first; | ||
| 363 | if (tmp) | ||
| 364 | goto found_middle; | ||
| 365 | size -= BITS_PER_LONG; | ||
| 366 | result += BITS_PER_LONG; | ||
| 367 | } | ||
| 368 | while (size & ~(BITS_PER_LONG-1)) { | ||
| 369 | if ((tmp = *(p++))) | ||
| 370 | goto found_middle; | ||
| 371 | result += BITS_PER_LONG; | ||
| 372 | size -= BITS_PER_LONG; | ||
| 373 | } | ||
| 374 | if (!size) | ||
| 375 | return result; | ||
| 376 | tmp = *p; | ||
| 377 | |||
| 378 | found_first: | ||
| 379 | tmp &= (~0UL >> (BITS_PER_LONG - size)); | ||
| 380 | if (tmp == 0UL) /* Are any bits set? */ | ||
| 381 | return result + size; /* Nope. */ | ||
| 382 | found_middle: | ||
| 383 | return result + __ffs(tmp); | ||
| 384 | } | ||
| 385 | |||
| 386 | /** | ||
| 387 | * find_first_bit - find the first set bit in a memory region | ||
| 388 | * @addr: The address to start the search at | ||
| 389 | * @size: The maximum size to search | ||
| 390 | * | ||
| 391 | * Returns the bit-number of the first set bit, not the number of the byte | ||
| 392 | * containing a bit. | ||
| 393 | */ | ||
| 394 | #define find_first_bit(addr, size) \ | ||
| 395 | find_next_bit((addr), (size), 0) | ||
| 396 | |||
| 397 | #define _EXT2_HAVE_ASM_BITOPS_ | ||
| 398 | 210 | ||
| 399 | #ifdef __KERNEL__ | 211 | #ifdef __KERNEL__ |
| 400 | /* | 212 | |
| 401 | * test_and_{set,clear}_bit guarantee atomicity without | 213 | #include <asm-generic/bitops/ext2-non-atomic.h> |
| 402 | * disabling interrupts. | ||
| 403 | */ | ||
| 404 | 214 | ||
| 405 | /* '3' is bits per byte */ | 215 | /* '3' is bits per byte */ |
| 406 | #define LE_BYTE_ADDR ((sizeof(unsigned long) - 1) << 3) | 216 | #define LE_BYTE_ADDR ((sizeof(unsigned long) - 1) << 3) |
| 407 | 217 | ||
| 408 | #define ext2_test_bit(nr, addr) \ | ||
| 409 | test_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr) | ||
| 410 | #define ext2_set_bit(nr, addr) \ | ||
| 411 | __test_and_set_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr) | ||
| 412 | #define ext2_clear_bit(nr, addr) \ | ||
| 413 | __test_and_clear_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr) | ||
| 414 | |||
| 415 | #define ext2_set_bit_atomic(l,nr,addr) \ | 218 | #define ext2_set_bit_atomic(l,nr,addr) \ |
| 416 | test_and_set_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr) | 219 | test_and_set_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr) |
| 417 | #define ext2_clear_bit_atomic(l,nr,addr) \ | 220 | #define ext2_clear_bit_atomic(l,nr,addr) \ |
| @@ -419,77 +222,6 @@ found_middle: | |||
| 419 | 222 | ||
| 420 | #endif /* __KERNEL__ */ | 223 | #endif /* __KERNEL__ */ |
| 421 | 224 | ||
| 422 | 225 | #include <asm-generic/bitops/minix-le.h> | |
| 423 | #define ext2_find_first_zero_bit(addr, size) \ | ||
| 424 | ext2_find_next_zero_bit((addr), (size), 0) | ||
| 425 | |||
| 426 | /* include/linux/byteorder does not support "unsigned long" type */ | ||
| 427 | static inline unsigned long ext2_swabp(unsigned long * x) | ||
| 428 | { | ||
| 429 | #ifdef __LP64__ | ||
| 430 | return (unsigned long) __swab64p((u64 *) x); | ||
| 431 | #else | ||
| 432 | return (unsigned long) __swab32p((u32 *) x); | ||
| 433 | #endif | ||
| 434 | } | ||
| 435 | |||
| 436 | /* include/linux/byteorder doesn't support "unsigned long" type */ | ||
| 437 | static inline unsigned long ext2_swab(unsigned long y) | ||
| 438 | { | ||
| 439 | #ifdef __LP64__ | ||
| 440 | return (unsigned long) __swab64((u64) y); | ||
| 441 | #else | ||
| 442 | return (unsigned long) __swab32((u32) y); | ||
| 443 | #endif | ||
| 444 | } | ||
| 445 | |||
| 446 | static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset) | ||
| 447 | { | ||
| 448 | unsigned long *p = (unsigned long *) addr + (offset >> SHIFT_PER_LONG); | ||
| 449 | unsigned long result = offset & ~(BITS_PER_LONG - 1); | ||
| 450 | unsigned long tmp; | ||
| 451 | |||
| 452 | if (offset >= size) | ||
| 453 | return size; | ||
| 454 | size -= result; | ||
| 455 | offset &= (BITS_PER_LONG - 1UL); | ||
| 456 | if (offset) { | ||
| 457 | tmp = ext2_swabp(p++); | ||
| 458 | tmp |= (~0UL >> (BITS_PER_LONG - offset)); | ||
| 459 | if (size < BITS_PER_LONG) | ||
| 460 | goto found_first; | ||
| 461 | if (~tmp) | ||
| 462 | goto found_middle; | ||
| 463 | size -= BITS_PER_LONG; | ||
| 464 | result += BITS_PER_LONG; | ||
| 465 | } | ||
| 466 | |||
| 467 | while (size & ~(BITS_PER_LONG - 1)) { | ||
| 468 | if (~(tmp = *(p++))) | ||
| 469 | goto found_middle_swap; | ||
| 470 | result += BITS_PER_LONG; | ||
| 471 | size -= BITS_PER_LONG; | ||
| 472 | } | ||
| 473 | if (!size) | ||
| 474 | return result; | ||
| 475 | tmp = ext2_swabp(p); | ||
| 476 | found_first: | ||
| 477 | tmp |= ~0UL << size; | ||
| 478 | if (tmp == ~0UL) /* Are any bits zero? */ | ||
| 479 | return result + size; /* Nope. Skip ffz */ | ||
| 480 | found_middle: | ||
| 481 | return result + ffz(tmp); | ||
| 482 | |||
| 483 | found_middle_swap: | ||
| 484 | return result + ffz(ext2_swab(tmp)); | ||
| 485 | } | ||
| 486 | |||
| 487 | |||
| 488 | /* Bitmap functions for the minix filesystem. */ | ||
| 489 | #define minix_test_and_set_bit(nr,addr) ext2_set_bit(nr,addr) | ||
| 490 | #define minix_set_bit(nr,addr) ((void)ext2_set_bit(nr,addr)) | ||
| 491 | #define minix_test_and_clear_bit(nr,addr) ext2_clear_bit(nr,addr) | ||
| 492 | #define minix_test_bit(nr,addr) ext2_test_bit(nr,addr) | ||
| 493 | #define minix_find_first_zero_bit(addr,size) ext2_find_first_zero_bit(addr,size) | ||
| 494 | 226 | ||
| 495 | #endif /* _PARISC_BITOPS_H */ | 227 | #endif /* _PARISC_BITOPS_H */ |
diff --git a/include/asm-parisc/compat.h b/include/asm-parisc/compat.h index 38b918feead9..289624d8b2d4 100644 --- a/include/asm-parisc/compat.h +++ b/include/asm-parisc/compat.h | |||
| @@ -138,6 +138,11 @@ static inline void __user *compat_ptr(compat_uptr_t uptr) | |||
| 138 | return (void __user *)(unsigned long)uptr; | 138 | return (void __user *)(unsigned long)uptr; |
| 139 | } | 139 | } |
| 140 | 140 | ||
| 141 | static inline compat_uptr_t ptr_to_compat(void __user *uptr) | ||
| 142 | { | ||
| 143 | return (u32)(unsigned long)uptr; | ||
| 144 | } | ||
| 145 | |||
| 141 | static __inline__ void __user *compat_alloc_user_space(long len) | 146 | static __inline__ void __user *compat_alloc_user_space(long len) |
| 142 | { | 147 | { |
| 143 | struct pt_regs *regs = ¤t->thread.regs; | 148 | struct pt_regs *regs = ¤t->thread.regs; |
diff --git a/include/asm-parisc/mmzone.h b/include/asm-parisc/mmzone.h index ae039f4fd711..ceb9b73199d1 100644 --- a/include/asm-parisc/mmzone.h +++ b/include/asm-parisc/mmzone.h | |||
| @@ -25,23 +25,6 @@ extern struct node_map_data node_data[]; | |||
| 25 | pg_data_t *__pgdat = NODE_DATA(nid); \ | 25 | pg_data_t *__pgdat = NODE_DATA(nid); \ |
| 26 | __pgdat->node_start_pfn + __pgdat->node_spanned_pages; \ | 26 | __pgdat->node_start_pfn + __pgdat->node_spanned_pages; \ |
| 27 | }) | 27 | }) |
| 28 | #define node_localnr(pfn, nid) ((pfn) - node_start_pfn(nid)) | ||
| 29 | |||
| 30 | #define pfn_to_page(pfn) \ | ||
| 31 | ({ \ | ||
| 32 | unsigned long __pfn = (pfn); \ | ||
| 33 | int __node = pfn_to_nid(__pfn); \ | ||
| 34 | &NODE_DATA(__node)->node_mem_map[node_localnr(__pfn,__node)]; \ | ||
| 35 | }) | ||
| 36 | |||
| 37 | #define page_to_pfn(pg) \ | ||
| 38 | ({ \ | ||
| 39 | struct page *__page = pg; \ | ||
| 40 | struct zone *__zone = page_zone(__page); \ | ||
| 41 | BUG_ON(__zone == NULL); \ | ||
| 42 | (unsigned long)(__page - __zone->zone_mem_map) \ | ||
| 43 | + __zone->zone_start_pfn; \ | ||
| 44 | }) | ||
| 45 | 28 | ||
| 46 | /* We have these possible memory map layouts: | 29 | /* We have these possible memory map layouts: |
| 47 | * Astro: 0-3.75, 67.75-68, 4-64 | 30 | * Astro: 0-3.75, 67.75-68, 4-64 |
diff --git a/include/asm-parisc/page.h b/include/asm-parisc/page.h index 4a6752b0afed..9f303c0c3cd7 100644 --- a/include/asm-parisc/page.h +++ b/include/asm-parisc/page.h | |||
| @@ -130,8 +130,6 @@ extern int npmem_ranges; | |||
| 130 | #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) | 130 | #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) |
| 131 | 131 | ||
| 132 | #ifndef CONFIG_DISCONTIGMEM | 132 | #ifndef CONFIG_DISCONTIGMEM |
| 133 | #define pfn_to_page(pfn) (mem_map + (pfn)) | ||
| 134 | #define page_to_pfn(page) ((unsigned long)((page) - mem_map)) | ||
| 135 | #define pfn_valid(pfn) ((pfn) < max_mapnr) | 133 | #define pfn_valid(pfn) ((pfn) < max_mapnr) |
| 136 | #endif /* CONFIG_DISCONTIGMEM */ | 134 | #endif /* CONFIG_DISCONTIGMEM */ |
| 137 | 135 | ||
| @@ -152,6 +150,7 @@ extern int npmem_ranges; | |||
| 152 | 150 | ||
| 153 | #endif /* __KERNEL__ */ | 151 | #endif /* __KERNEL__ */ |
| 154 | 152 | ||
| 153 | #include <asm-generic/memory_model.h> | ||
| 155 | #include <asm-generic/page.h> | 154 | #include <asm-generic/page.h> |
| 156 | 155 | ||
| 157 | #endif /* _PARISC_PAGE_H */ | 156 | #endif /* _PARISC_PAGE_H */ |
diff --git a/include/asm-powerpc/bitops.h b/include/asm-powerpc/bitops.h index bf6941a810b8..d1c2a4405660 100644 --- a/include/asm-powerpc/bitops.h +++ b/include/asm-powerpc/bitops.h | |||
| @@ -184,72 +184,7 @@ static __inline__ void set_bits(unsigned long mask, unsigned long *addr) | |||
| 184 | : "cc"); | 184 | : "cc"); |
| 185 | } | 185 | } |
| 186 | 186 | ||
| 187 | /* Non-atomic versions */ | 187 | #include <asm-generic/bitops/non-atomic.h> |
| 188 | static __inline__ int test_bit(unsigned long nr, | ||
| 189 | __const__ volatile unsigned long *addr) | ||
| 190 | { | ||
| 191 | return 1UL & (addr[BITOP_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); | ||
| 192 | } | ||
| 193 | |||
| 194 | static __inline__ void __set_bit(unsigned long nr, | ||
| 195 | volatile unsigned long *addr) | ||
| 196 | { | ||
| 197 | unsigned long mask = BITOP_MASK(nr); | ||
| 198 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | ||
| 199 | |||
| 200 | *p |= mask; | ||
| 201 | } | ||
| 202 | |||
| 203 | static __inline__ void __clear_bit(unsigned long nr, | ||
| 204 | volatile unsigned long *addr) | ||
| 205 | { | ||
| 206 | unsigned long mask = BITOP_MASK(nr); | ||
| 207 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | ||
| 208 | |||
| 209 | *p &= ~mask; | ||
| 210 | } | ||
| 211 | |||
| 212 | static __inline__ void __change_bit(unsigned long nr, | ||
| 213 | volatile unsigned long *addr) | ||
| 214 | { | ||
| 215 | unsigned long mask = BITOP_MASK(nr); | ||
| 216 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | ||
| 217 | |||
| 218 | *p ^= mask; | ||
| 219 | } | ||
| 220 | |||
| 221 | static __inline__ int __test_and_set_bit(unsigned long nr, | ||
| 222 | volatile unsigned long *addr) | ||
| 223 | { | ||
| 224 | unsigned long mask = BITOP_MASK(nr); | ||
| 225 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | ||
| 226 | unsigned long old = *p; | ||
| 227 | |||
| 228 | *p = old | mask; | ||
| 229 | return (old & mask) != 0; | ||
| 230 | } | ||
| 231 | |||
| 232 | static __inline__ int __test_and_clear_bit(unsigned long nr, | ||
| 233 | volatile unsigned long *addr) | ||
| 234 | { | ||
| 235 | unsigned long mask = BITOP_MASK(nr); | ||
| 236 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | ||
| 237 | unsigned long old = *p; | ||
| 238 | |||
| 239 | *p = old & ~mask; | ||
| 240 | return (old & mask) != 0; | ||
| 241 | } | ||
| 242 | |||
| 243 | static __inline__ int __test_and_change_bit(unsigned long nr, | ||
| 244 | volatile unsigned long *addr) | ||
| 245 | { | ||
| 246 | unsigned long mask = BITOP_MASK(nr); | ||
| 247 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | ||
| 248 | unsigned long old = *p; | ||
| 249 | |||
| 250 | *p = old ^ mask; | ||
| 251 | return (old & mask) != 0; | ||
| 252 | } | ||
| 253 | 188 | ||
| 254 | /* | 189 | /* |
| 255 | * Return the zero-based bit position (LE, not IBM bit numbering) of | 190 | * Return the zero-based bit position (LE, not IBM bit numbering) of |
| @@ -310,16 +245,9 @@ static __inline__ int fls(unsigned int x) | |||
| 310 | asm ("cntlzw %0,%1" : "=r" (lz) : "r" (x)); | 245 | asm ("cntlzw %0,%1" : "=r" (lz) : "r" (x)); |
| 311 | return 32 - lz; | 246 | return 32 - lz; |
| 312 | } | 247 | } |
| 313 | #define fls64(x) generic_fls64(x) | 248 | #include <asm-generic/bitops/fls64.h> |
| 314 | 249 | ||
| 315 | /* | 250 | #include <asm-generic/bitops/hweight.h> |
| 316 | * hweightN: returns the hamming weight (i.e. the number | ||
| 317 | * of bits set) of a N-bit word | ||
| 318 | */ | ||
| 319 | #define hweight64(x) generic_hweight64(x) | ||
| 320 | #define hweight32(x) generic_hweight32(x) | ||
| 321 | #define hweight16(x) generic_hweight16(x) | ||
| 322 | #define hweight8(x) generic_hweight8(x) | ||
| 323 | 251 | ||
| 324 | #define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0) | 252 | #define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0) |
| 325 | unsigned long find_next_zero_bit(const unsigned long *addr, | 253 | unsigned long find_next_zero_bit(const unsigned long *addr, |
| @@ -397,32 +325,7 @@ unsigned long find_next_zero_le_bit(const unsigned long *addr, | |||
| 397 | #define minix_find_first_zero_bit(addr,size) \ | 325 | #define minix_find_first_zero_bit(addr,size) \ |
| 398 | find_first_zero_le_bit((unsigned long *)addr, size) | 326 | find_first_zero_le_bit((unsigned long *)addr, size) |
| 399 | 327 | ||
| 400 | /* | 328 | #include <asm-generic/bitops/sched.h> |
| 401 | * Every architecture must define this function. It's the fastest | ||
| 402 | * way of searching a 140-bit bitmap where the first 100 bits are | ||
| 403 | * unlikely to be set. It's guaranteed that at least one of the 140 | ||
| 404 | * bits is cleared. | ||
| 405 | */ | ||
| 406 | static inline int sched_find_first_bit(const unsigned long *b) | ||
| 407 | { | ||
| 408 | #ifdef CONFIG_PPC64 | ||
| 409 | if (unlikely(b[0])) | ||
| 410 | return __ffs(b[0]); | ||
| 411 | if (unlikely(b[1])) | ||
| 412 | return __ffs(b[1]) + 64; | ||
| 413 | return __ffs(b[2]) + 128; | ||
| 414 | #else | ||
| 415 | if (unlikely(b[0])) | ||
| 416 | return __ffs(b[0]); | ||
| 417 | if (unlikely(b[1])) | ||
| 418 | return __ffs(b[1]) + 32; | ||
| 419 | if (unlikely(b[2])) | ||
| 420 | return __ffs(b[2]) + 64; | ||
| 421 | if (b[3]) | ||
| 422 | return __ffs(b[3]) + 96; | ||
| 423 | return __ffs(b[4]) + 128; | ||
| 424 | #endif | ||
| 425 | } | ||
| 426 | 329 | ||
| 427 | #endif /* __KERNEL__ */ | 330 | #endif /* __KERNEL__ */ |
| 428 | 331 | ||
diff --git a/include/asm-powerpc/futex.h b/include/asm-powerpc/futex.h index 39e85f320a76..f1b3c00bc1ce 100644 --- a/include/asm-powerpc/futex.h +++ b/include/asm-powerpc/futex.h | |||
| @@ -81,5 +81,11 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) | |||
| 81 | return ret; | 81 | return ret; |
| 82 | } | 82 | } |
| 83 | 83 | ||
| 84 | static inline int | ||
| 85 | futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) | ||
| 86 | { | ||
| 87 | return -ENOSYS; | ||
| 88 | } | ||
| 89 | |||
| 84 | #endif /* __KERNEL__ */ | 90 | #endif /* __KERNEL__ */ |
| 85 | #endif /* _ASM_POWERPC_FUTEX_H */ | 91 | #endif /* _ASM_POWERPC_FUTEX_H */ |
diff --git a/include/asm-powerpc/kdebug.h b/include/asm-powerpc/kdebug.h index 7c16265568e0..c01786ab5fa6 100644 --- a/include/asm-powerpc/kdebug.h +++ b/include/asm-powerpc/kdebug.h | |||
| @@ -16,13 +16,9 @@ struct die_args { | |||
| 16 | int signr; | 16 | int signr; |
| 17 | }; | 17 | }; |
| 18 | 18 | ||
| 19 | /* | 19 | extern int register_die_notifier(struct notifier_block *); |
| 20 | Note - you should never unregister because that can race with NMIs. | 20 | extern int unregister_die_notifier(struct notifier_block *); |
| 21 | If you really want to do it first unregister - then synchronize_sched - | 21 | extern struct atomic_notifier_head powerpc_die_chain; |
| 22 | then free. | ||
| 23 | */ | ||
| 24 | int register_die_notifier(struct notifier_block *nb); | ||
| 25 | extern struct notifier_block *powerpc_die_chain; | ||
| 26 | 22 | ||
| 27 | /* Grossly misnamed. */ | 23 | /* Grossly misnamed. */ |
| 28 | enum die_val { | 24 | enum die_val { |
| @@ -37,7 +33,7 @@ enum die_val { | |||
| 37 | static inline int notify_die(enum die_val val,char *str,struct pt_regs *regs,long err,int trap, int sig) | 33 | static inline int notify_die(enum die_val val,char *str,struct pt_regs *regs,long err,int trap, int sig) |
| 38 | { | 34 | { |
| 39 | struct die_args args = { .regs=regs, .str=str, .err=err, .trapnr=trap,.signr=sig }; | 35 | struct die_args args = { .regs=regs, .str=str, .err=err, .trapnr=trap,.signr=sig }; |
| 40 | return notifier_call_chain(&powerpc_die_chain, val, &args); | 36 | return atomic_notifier_call_chain(&powerpc_die_chain, val, &args); |
| 41 | } | 37 | } |
| 42 | 38 | ||
| 43 | #endif /* __KERNEL__ */ | 39 | #endif /* __KERNEL__ */ |
diff --git a/include/asm-powerpc/page.h b/include/asm-powerpc/page.h index 0b82df483f7f..2fbecebe1c92 100644 --- a/include/asm-powerpc/page.h +++ b/include/asm-powerpc/page.h | |||
| @@ -69,8 +69,6 @@ | |||
| 69 | #endif | 69 | #endif |
| 70 | 70 | ||
| 71 | #ifdef CONFIG_FLATMEM | 71 | #ifdef CONFIG_FLATMEM |
| 72 | #define pfn_to_page(pfn) (mem_map + (pfn)) | ||
| 73 | #define page_to_pfn(page) ((unsigned long)((page) - mem_map)) | ||
| 74 | #define pfn_valid(pfn) ((pfn) < max_mapnr) | 72 | #define pfn_valid(pfn) ((pfn) < max_mapnr) |
| 75 | #endif | 73 | #endif |
| 76 | 74 | ||
| @@ -200,6 +198,7 @@ extern void copy_user_page(void *to, void *from, unsigned long vaddr, | |||
| 200 | struct page *p); | 198 | struct page *p); |
| 201 | extern int page_is_ram(unsigned long pfn); | 199 | extern int page_is_ram(unsigned long pfn); |
| 202 | 200 | ||
| 201 | #include <asm-generic/memory_model.h> | ||
| 203 | #endif /* __ASSEMBLY__ */ | 202 | #endif /* __ASSEMBLY__ */ |
| 204 | 203 | ||
| 205 | #endif /* __KERNEL__ */ | 204 | #endif /* __KERNEL__ */ |
diff --git a/include/asm-powerpc/types.h b/include/asm-powerpc/types.h index ec3c2ee8bf86..baabba96e313 100644 --- a/include/asm-powerpc/types.h +++ b/include/asm-powerpc/types.h | |||
| @@ -103,6 +103,11 @@ typedef u64 sector_t; | |||
| 103 | #define HAVE_SECTOR_T | 103 | #define HAVE_SECTOR_T |
| 104 | #endif | 104 | #endif |
| 105 | 105 | ||
| 106 | #ifdef CONFIG_LSF | ||
| 107 | typedef u64 blkcnt_t; | ||
| 108 | #define HAVE_BLKCNT_T | ||
| 109 | #endif | ||
| 110 | |||
| 106 | #endif /* __ASSEMBLY__ */ | 111 | #endif /* __ASSEMBLY__ */ |
| 107 | 112 | ||
| 108 | #endif /* __KERNEL__ */ | 113 | #endif /* __KERNEL__ */ |
diff --git a/include/asm-ppc/page.h b/include/asm-ppc/page.h index 538e0c8ab243..a70ba2ee552d 100644 --- a/include/asm-ppc/page.h +++ b/include/asm-ppc/page.h | |||
| @@ -149,8 +149,7 @@ extern int page_is_ram(unsigned long pfn); | |||
| 149 | #define __pa(x) ___pa((unsigned long)(x)) | 149 | #define __pa(x) ___pa((unsigned long)(x)) |
| 150 | #define __va(x) ((void *)(___va((unsigned long)(x)))) | 150 | #define __va(x) ((void *)(___va((unsigned long)(x)))) |
| 151 | 151 | ||
| 152 | #define pfn_to_page(pfn) (mem_map + ((pfn) - PPC_PGSTART)) | 152 | #define ARCH_PFN_OFFSET (PPC_PGSTART) |
| 153 | #define page_to_pfn(page) ((unsigned long)((page) - mem_map) + PPC_PGSTART) | ||
| 154 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) | 153 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) |
| 155 | #define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT) | 154 | #define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT) |
| 156 | 155 | ||
| @@ -175,5 +174,6 @@ extern __inline__ int get_order(unsigned long size) | |||
| 175 | /* We do define AT_SYSINFO_EHDR but don't use the gate mecanism */ | 174 | /* We do define AT_SYSINFO_EHDR but don't use the gate mecanism */ |
| 176 | #define __HAVE_ARCH_GATE_AREA 1 | 175 | #define __HAVE_ARCH_GATE_AREA 1 |
| 177 | 176 | ||
| 177 | #include <asm-generic/memory_model.h> | ||
| 178 | #endif /* __KERNEL__ */ | 178 | #endif /* __KERNEL__ */ |
| 179 | #endif /* _PPC_PAGE_H */ | 179 | #endif /* _PPC_PAGE_H */ |
diff --git a/include/asm-s390/bitops.h b/include/asm-s390/bitops.h index 3628899f48bb..ca092ffb7a95 100644 --- a/include/asm-s390/bitops.h +++ b/include/asm-s390/bitops.h | |||
| @@ -828,35 +828,12 @@ static inline int sched_find_first_bit(unsigned long *b) | |||
| 828 | return find_first_bit(b, 140); | 828 | return find_first_bit(b, 140); |
| 829 | } | 829 | } |
| 830 | 830 | ||
| 831 | /* | 831 | #include <asm-generic/bitops/ffs.h> |
| 832 | * ffs: find first bit set. This is defined the same way as | ||
| 833 | * the libc and compiler builtin ffs routines, therefore | ||
| 834 | * differs in spirit from the above ffz (man ffs). | ||
| 835 | */ | ||
| 836 | #define ffs(x) generic_ffs(x) | ||
| 837 | 832 | ||
| 838 | /* | 833 | #include <asm-generic/bitops/fls.h> |
| 839 | * fls: find last bit set. | 834 | #include <asm-generic/bitops/fls64.h> |
| 840 | */ | ||
| 841 | #define fls(x) generic_fls(x) | ||
| 842 | #define fls64(x) generic_fls64(x) | ||
| 843 | |||
| 844 | /* | ||
| 845 | * hweightN: returns the hamming weight (i.e. the number | ||
| 846 | * of bits set) of a N-bit word | ||
| 847 | */ | ||
| 848 | #define hweight64(x) \ | ||
| 849 | ({ \ | ||
| 850 | unsigned long __x = (x); \ | ||
| 851 | unsigned int __w; \ | ||
| 852 | __w = generic_hweight32((unsigned int) __x); \ | ||
| 853 | __w += generic_hweight32((unsigned int) (__x>>32)); \ | ||
| 854 | __w; \ | ||
| 855 | }) | ||
| 856 | #define hweight32(x) generic_hweight32(x) | ||
| 857 | #define hweight16(x) generic_hweight16(x) | ||
| 858 | #define hweight8(x) generic_hweight8(x) | ||
| 859 | 835 | ||
| 836 | #include <asm-generic/bitops/hweight.h> | ||
| 860 | 837 | ||
| 861 | #ifdef __KERNEL__ | 838 | #ifdef __KERNEL__ |
| 862 | 839 | ||
| @@ -871,11 +848,11 @@ static inline int sched_find_first_bit(unsigned long *b) | |||
| 871 | */ | 848 | */ |
| 872 | 849 | ||
| 873 | #define ext2_set_bit(nr, addr) \ | 850 | #define ext2_set_bit(nr, addr) \ |
| 874 | test_and_set_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) | 851 | __test_and_set_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) |
| 875 | #define ext2_set_bit_atomic(lock, nr, addr) \ | 852 | #define ext2_set_bit_atomic(lock, nr, addr) \ |
| 876 | test_and_set_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) | 853 | test_and_set_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) |
| 877 | #define ext2_clear_bit(nr, addr) \ | 854 | #define ext2_clear_bit(nr, addr) \ |
| 878 | test_and_clear_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) | 855 | __test_and_clear_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) |
| 879 | #define ext2_clear_bit_atomic(lock, nr, addr) \ | 856 | #define ext2_clear_bit_atomic(lock, nr, addr) \ |
| 880 | test_and_clear_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) | 857 | test_and_clear_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) |
| 881 | #define ext2_test_bit(nr, addr) \ | 858 | #define ext2_test_bit(nr, addr) \ |
| @@ -1011,18 +988,7 @@ ext2_find_next_zero_bit(void *vaddr, unsigned long size, unsigned long offset) | |||
| 1011 | return offset + ext2_find_first_zero_bit(p, size); | 988 | return offset + ext2_find_first_zero_bit(p, size); |
| 1012 | } | 989 | } |
| 1013 | 990 | ||
| 1014 | /* Bitmap functions for the minix filesystem. */ | 991 | #include <asm-generic/bitops/minix.h> |
| 1015 | /* FIXME !!! */ | ||
| 1016 | #define minix_test_and_set_bit(nr,addr) \ | ||
| 1017 | test_and_set_bit(nr,(unsigned long *)addr) | ||
| 1018 | #define minix_set_bit(nr,addr) \ | ||
| 1019 | set_bit(nr,(unsigned long *)addr) | ||
| 1020 | #define minix_test_and_clear_bit(nr,addr) \ | ||
| 1021 | test_and_clear_bit(nr,(unsigned long *)addr) | ||
| 1022 | #define minix_test_bit(nr,addr) \ | ||
| 1023 | test_bit(nr,(unsigned long *)addr) | ||
| 1024 | #define minix_find_first_zero_bit(addr,size) \ | ||
| 1025 | find_first_zero_bit(addr,size) | ||
| 1026 | 992 | ||
| 1027 | #endif /* __KERNEL__ */ | 993 | #endif /* __KERNEL__ */ |
| 1028 | 994 | ||
diff --git a/include/asm-s390/compat.h b/include/asm-s390/compat.h index a007715f4aea..356a0b183539 100644 --- a/include/asm-s390/compat.h +++ b/include/asm-s390/compat.h | |||
| @@ -128,6 +128,11 @@ static inline void __user *compat_ptr(compat_uptr_t uptr) | |||
| 128 | return (void __user *)(unsigned long)(uptr & 0x7fffffffUL); | 128 | return (void __user *)(unsigned long)(uptr & 0x7fffffffUL); |
| 129 | } | 129 | } |
| 130 | 130 | ||
| 131 | static inline compat_uptr_t ptr_to_compat(void __user *uptr) | ||
| 132 | { | ||
| 133 | return (u32)(unsigned long)uptr; | ||
| 134 | } | ||
| 135 | |||
| 131 | static inline void __user *compat_alloc_user_space(long len) | 136 | static inline void __user *compat_alloc_user_space(long len) |
| 132 | { | 137 | { |
| 133 | unsigned long stack; | 138 | unsigned long stack; |
diff --git a/include/asm-s390/page.h b/include/asm-s390/page.h index 2430c561e021..3b1138ac7e79 100644 --- a/include/asm-s390/page.h +++ b/include/asm-s390/page.h | |||
| @@ -181,8 +181,6 @@ page_get_storage_key(unsigned long addr) | |||
| 181 | #define PAGE_OFFSET 0x0UL | 181 | #define PAGE_OFFSET 0x0UL |
| 182 | #define __pa(x) (unsigned long)(x) | 182 | #define __pa(x) (unsigned long)(x) |
| 183 | #define __va(x) (void *)(unsigned long)(x) | 183 | #define __va(x) (void *)(unsigned long)(x) |
| 184 | #define pfn_to_page(pfn) (mem_map + (pfn)) | ||
| 185 | #define page_to_pfn(page) ((unsigned long)((page) - mem_map)) | ||
| 186 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) | 184 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) |
| 187 | 185 | ||
| 188 | #define pfn_valid(pfn) ((pfn) < max_mapnr) | 186 | #define pfn_valid(pfn) ((pfn) < max_mapnr) |
| @@ -193,6 +191,7 @@ page_get_storage_key(unsigned long addr) | |||
| 193 | 191 | ||
| 194 | #endif /* __KERNEL__ */ | 192 | #endif /* __KERNEL__ */ |
| 195 | 193 | ||
| 194 | #include <asm-generic/memory_model.h> | ||
| 196 | #include <asm-generic/page.h> | 195 | #include <asm-generic/page.h> |
| 197 | 196 | ||
| 198 | #endif /* _S390_PAGE_H */ | 197 | #endif /* _S390_PAGE_H */ |
diff --git a/include/asm-s390/types.h b/include/asm-s390/types.h index d0be3e477013..5738ad63537c 100644 --- a/include/asm-s390/types.h +++ b/include/asm-s390/types.h | |||
| @@ -93,6 +93,11 @@ typedef u64 sector_t; | |||
| 93 | #define HAVE_SECTOR_T | 93 | #define HAVE_SECTOR_T |
| 94 | #endif | 94 | #endif |
| 95 | 95 | ||
| 96 | #ifdef CONFIG_LSF | ||
| 97 | typedef u64 blkcnt_t; | ||
| 98 | #define HAVE_BLKCNT_T | ||
| 99 | #endif | ||
| 100 | |||
| 96 | #endif /* ! __s390x__ */ | 101 | #endif /* ! __s390x__ */ |
| 97 | #endif /* __ASSEMBLY__ */ | 102 | #endif /* __ASSEMBLY__ */ |
| 98 | #endif /* __KERNEL__ */ | 103 | #endif /* __KERNEL__ */ |
diff --git a/include/asm-sh/bitops.h b/include/asm-sh/bitops.h index 1c5260860045..e34f82508568 100644 --- a/include/asm-sh/bitops.h +++ b/include/asm-sh/bitops.h | |||
| @@ -19,16 +19,6 @@ static __inline__ void set_bit(int nr, volatile void * addr) | |||
| 19 | local_irq_restore(flags); | 19 | local_irq_restore(flags); |
| 20 | } | 20 | } |
| 21 | 21 | ||
| 22 | static __inline__ void __set_bit(int nr, volatile void * addr) | ||
| 23 | { | ||
| 24 | int mask; | ||
| 25 | volatile unsigned int *a = addr; | ||
| 26 | |||
| 27 | a += nr >> 5; | ||
| 28 | mask = 1 << (nr & 0x1f); | ||
| 29 | *a |= mask; | ||
| 30 | } | ||
| 31 | |||
| 32 | /* | 22 | /* |
| 33 | * clear_bit() doesn't provide any barrier for the compiler. | 23 | * clear_bit() doesn't provide any barrier for the compiler. |
| 34 | */ | 24 | */ |
| @@ -47,16 +37,6 @@ static __inline__ void clear_bit(int nr, volatile void * addr) | |||
| 47 | local_irq_restore(flags); | 37 | local_irq_restore(flags); |
| 48 | } | 38 | } |
| 49 | 39 | ||
| 50 | static __inline__ void __clear_bit(int nr, volatile void * addr) | ||
| 51 | { | ||
| 52 | int mask; | ||
| 53 | volatile unsigned int *a = addr; | ||
| 54 | |||
| 55 | a += nr >> 5; | ||
| 56 | mask = 1 << (nr & 0x1f); | ||
| 57 | *a &= ~mask; | ||
| 58 | } | ||
| 59 | |||
| 60 | static __inline__ void change_bit(int nr, volatile void * addr) | 40 | static __inline__ void change_bit(int nr, volatile void * addr) |
| 61 | { | 41 | { |
| 62 | int mask; | 42 | int mask; |
| @@ -70,16 +50,6 @@ static __inline__ void change_bit(int nr, volatile void * addr) | |||
| 70 | local_irq_restore(flags); | 50 | local_irq_restore(flags); |
| 71 | } | 51 | } |
| 72 | 52 | ||
| 73 | static __inline__ void __change_bit(int nr, volatile void * addr) | ||
| 74 | { | ||
| 75 | int mask; | ||
| 76 | volatile unsigned int *a = addr; | ||
| 77 | |||
| 78 | a += nr >> 5; | ||
| 79 | mask = 1 << (nr & 0x1f); | ||
| 80 | *a ^= mask; | ||
| 81 | } | ||
| 82 | |||
| 83 | static __inline__ int test_and_set_bit(int nr, volatile void * addr) | 53 | static __inline__ int test_and_set_bit(int nr, volatile void * addr) |
| 84 | { | 54 | { |
| 85 | int mask, retval; | 55 | int mask, retval; |
| @@ -96,19 +66,6 @@ static __inline__ int test_and_set_bit(int nr, volatile void * addr) | |||
| 96 | return retval; | 66 | return retval; |
| 97 | } | 67 | } |
| 98 | 68 | ||
| 99 | static __inline__ int __test_and_set_bit(int nr, volatile void * addr) | ||
| 100 | { | ||
| 101 | int mask, retval; | ||
| 102 | volatile unsigned int *a = addr; | ||
| 103 | |||
| 104 | a += nr >> 5; | ||
| 105 | mask = 1 << (nr & 0x1f); | ||
| 106 | retval = (mask & *a) != 0; | ||
| 107 | *a |= mask; | ||
| 108 | |||
| 109 | return retval; | ||
| 110 | } | ||
| 111 | |||
| 112 | static __inline__ int test_and_clear_bit(int nr, volatile void * addr) | 69 | static __inline__ int test_and_clear_bit(int nr, volatile void * addr) |
| 113 | { | 70 | { |
| 114 | int mask, retval; | 71 | int mask, retval; |
| @@ -125,19 +82,6 @@ static __inline__ int test_and_clear_bit(int nr, volatile void * addr) | |||
| 125 | return retval; | 82 | return retval; |
| 126 | } | 83 | } |
| 127 | 84 | ||
| 128 | static __inline__ int __test_and_clear_bit(int nr, volatile void * addr) | ||
| 129 | { | ||
| 130 | int mask, retval; | ||
| 131 | volatile unsigned int *a = addr; | ||
| 132 | |||
| 133 | a += nr >> 5; | ||
| 134 | mask = 1 << (nr & 0x1f); | ||
| 135 | retval = (mask & *a) != 0; | ||
| 136 | *a &= ~mask; | ||
| 137 | |||
| 138 | return retval; | ||
| 139 | } | ||
| 140 | |||
| 141 | static __inline__ int test_and_change_bit(int nr, volatile void * addr) | 85 | static __inline__ int test_and_change_bit(int nr, volatile void * addr) |
| 142 | { | 86 | { |
| 143 | int mask, retval; | 87 | int mask, retval; |
| @@ -154,23 +98,7 @@ static __inline__ int test_and_change_bit(int nr, volatile void * addr) | |||
| 154 | return retval; | 98 | return retval; |
| 155 | } | 99 | } |
| 156 | 100 | ||
| 157 | static __inline__ int __test_and_change_bit(int nr, volatile void * addr) | 101 | #include <asm-generic/bitops/non-atomic.h> |
| 158 | { | ||
| 159 | int mask, retval; | ||
| 160 | volatile unsigned int *a = addr; | ||
| 161 | |||
| 162 | a += nr >> 5; | ||
| 163 | mask = 1 << (nr & 0x1f); | ||
| 164 | retval = (mask & *a) != 0; | ||
| 165 | *a ^= mask; | ||
| 166 | |||
| 167 | return retval; | ||
| 168 | } | ||
| 169 | |||
| 170 | static __inline__ int test_bit(int nr, const volatile void *addr) | ||
| 171 | { | ||
| 172 | return 1UL & (((const volatile unsigned int *) addr)[nr >> 5] >> (nr & 31)); | ||
| 173 | } | ||
| 174 | 102 | ||
| 175 | static __inline__ unsigned long ffz(unsigned long word) | 103 | static __inline__ unsigned long ffz(unsigned long word) |
| 176 | { | 104 | { |
| @@ -206,271 +134,15 @@ static __inline__ unsigned long __ffs(unsigned long word) | |||
| 206 | return result; | 134 | return result; |
| 207 | } | 135 | } |
| 208 | 136 | ||
| 209 | /** | 137 | #include <asm-generic/bitops/find.h> |
| 210 | * find_next_bit - find the next set bit in a memory region | 138 | #include <asm-generic/bitops/ffs.h> |
| 211 | * @addr: The address to base the search on | 139 | #include <asm-generic/bitops/hweight.h> |
| 212 | * @offset: The bitnumber to start searching at | 140 | #include <asm-generic/bitops/sched.h> |
| 213 | * @size: The maximum size to search | 141 | #include <asm-generic/bitops/ext2-non-atomic.h> |
| 214 | */ | 142 | #include <asm-generic/bitops/ext2-atomic.h> |
| 215 | static __inline__ unsigned long find_next_bit(const unsigned long *addr, | 143 | #include <asm-generic/bitops/minix.h> |
| 216 | unsigned long size, unsigned long offset) | 144 | #include <asm-generic/bitops/fls.h> |
| 217 | { | 145 | #include <asm-generic/bitops/fls64.h> |
| 218 | unsigned int *p = ((unsigned int *) addr) + (offset >> 5); | ||
| 219 | unsigned int result = offset & ~31UL; | ||
| 220 | unsigned int tmp; | ||
| 221 | |||
| 222 | if (offset >= size) | ||
| 223 | return size; | ||
| 224 | size -= result; | ||
| 225 | offset &= 31UL; | ||
| 226 | if (offset) { | ||
| 227 | tmp = *p++; | ||
| 228 | tmp &= ~0UL << offset; | ||
| 229 | if (size < 32) | ||
| 230 | goto found_first; | ||
| 231 | if (tmp) | ||
| 232 | goto found_middle; | ||
| 233 | size -= 32; | ||
| 234 | result += 32; | ||
| 235 | } | ||
| 236 | while (size >= 32) { | ||
| 237 | if ((tmp = *p++) != 0) | ||
| 238 | goto found_middle; | ||
| 239 | result += 32; | ||
| 240 | size -= 32; | ||
| 241 | } | ||
| 242 | if (!size) | ||
| 243 | return result; | ||
| 244 | tmp = *p; | ||
| 245 | |||
| 246 | found_first: | ||
| 247 | tmp &= ~0UL >> (32 - size); | ||
| 248 | if (tmp == 0UL) /* Are any bits set? */ | ||
| 249 | return result + size; /* Nope. */ | ||
| 250 | found_middle: | ||
| 251 | return result + __ffs(tmp); | ||
| 252 | } | ||
| 253 | |||
| 254 | /** | ||
| 255 | * find_first_bit - find the first set bit in a memory region | ||
| 256 | * @addr: The address to start the search at | ||
| 257 | * @size: The maximum size to search | ||
| 258 | * | ||
| 259 | * Returns the bit-number of the first set bit, not the number of the byte | ||
| 260 | * containing a bit. | ||
| 261 | */ | ||
| 262 | #define find_first_bit(addr, size) \ | ||
| 263 | find_next_bit((addr), (size), 0) | ||
| 264 | |||
| 265 | static __inline__ int find_next_zero_bit(const unsigned long *addr, int size, int offset) | ||
| 266 | { | ||
| 267 | const unsigned long *p = ((unsigned long *) addr) + (offset >> 5); | ||
| 268 | unsigned long result = offset & ~31UL; | ||
| 269 | unsigned long tmp; | ||
| 270 | |||
| 271 | if (offset >= size) | ||
| 272 | return size; | ||
| 273 | size -= result; | ||
| 274 | offset &= 31UL; | ||
| 275 | if (offset) { | ||
| 276 | tmp = *(p++); | ||
| 277 | tmp |= ~0UL >> (32-offset); | ||
| 278 | if (size < 32) | ||
| 279 | goto found_first; | ||
| 280 | if (~tmp) | ||
| 281 | goto found_middle; | ||
| 282 | size -= 32; | ||
| 283 | result += 32; | ||
| 284 | } | ||
| 285 | while (size & ~31UL) { | ||
| 286 | if (~(tmp = *(p++))) | ||
| 287 | goto found_middle; | ||
| 288 | result += 32; | ||
| 289 | size -= 32; | ||
| 290 | } | ||
| 291 | if (!size) | ||
| 292 | return result; | ||
| 293 | tmp = *p; | ||
| 294 | |||
| 295 | found_first: | ||
| 296 | tmp |= ~0UL << size; | ||
| 297 | found_middle: | ||
| 298 | return result + ffz(tmp); | ||
| 299 | } | ||
| 300 | |||
| 301 | #define find_first_zero_bit(addr, size) \ | ||
| 302 | find_next_zero_bit((addr), (size), 0) | ||
| 303 | |||
| 304 | /* | ||
| 305 | * ffs: find first bit set. This is defined the same way as | ||
| 306 | * the libc and compiler builtin ffs routines, therefore | ||
| 307 | * differs in spirit from the above ffz (man ffs). | ||
| 308 | */ | ||
| 309 | |||
| 310 | #define ffs(x) generic_ffs(x) | ||
| 311 | |||
| 312 | /* | ||
| 313 | * hweightN: returns the hamming weight (i.e. the number | ||
| 314 | * of bits set) of a N-bit word | ||
| 315 | */ | ||
| 316 | |||
| 317 | #define hweight32(x) generic_hweight32(x) | ||
| 318 | #define hweight16(x) generic_hweight16(x) | ||
| 319 | #define hweight8(x) generic_hweight8(x) | ||
| 320 | |||
| 321 | /* | ||
| 322 | * Every architecture must define this function. It's the fastest | ||
| 323 | * way of searching a 140-bit bitmap where the first 100 bits are | ||
| 324 | * unlikely to be set. It's guaranteed that at least one of the 140 | ||
| 325 | * bits is cleared. | ||
| 326 | */ | ||
| 327 | |||
| 328 | static inline int sched_find_first_bit(const unsigned long *b) | ||
| 329 | { | ||
| 330 | if (unlikely(b[0])) | ||
| 331 | return __ffs(b[0]); | ||
| 332 | if (unlikely(b[1])) | ||
| 333 | return __ffs(b[1]) + 32; | ||
| 334 | if (unlikely(b[2])) | ||
| 335 | return __ffs(b[2]) + 64; | ||
| 336 | if (b[3]) | ||
| 337 | return __ffs(b[3]) + 96; | ||
| 338 | return __ffs(b[4]) + 128; | ||
| 339 | } | ||
| 340 | |||
| 341 | #ifdef __LITTLE_ENDIAN__ | ||
| 342 | #define ext2_set_bit(nr, addr) test_and_set_bit((nr), (addr)) | ||
| 343 | #define ext2_clear_bit(nr, addr) test_and_clear_bit((nr), (addr)) | ||
| 344 | #define ext2_test_bit(nr, addr) test_bit((nr), (addr)) | ||
| 345 | #define ext2_find_first_zero_bit(addr, size) find_first_zero_bit((addr), (size)) | ||
| 346 | #define ext2_find_next_zero_bit(addr, size, offset) \ | ||
| 347 | find_next_zero_bit((unsigned long *)(addr), (size), (offset)) | ||
| 348 | #else | ||
| 349 | static __inline__ int ext2_set_bit(int nr, volatile void * addr) | ||
| 350 | { | ||
| 351 | int mask, retval; | ||
| 352 | unsigned long flags; | ||
| 353 | volatile unsigned char *ADDR = (unsigned char *) addr; | ||
| 354 | |||
| 355 | ADDR += nr >> 3; | ||
| 356 | mask = 1 << (nr & 0x07); | ||
| 357 | local_irq_save(flags); | ||
| 358 | retval = (mask & *ADDR) != 0; | ||
| 359 | *ADDR |= mask; | ||
| 360 | local_irq_restore(flags); | ||
| 361 | return retval; | ||
| 362 | } | ||
| 363 | |||
| 364 | static __inline__ int ext2_clear_bit(int nr, volatile void * addr) | ||
| 365 | { | ||
| 366 | int mask, retval; | ||
| 367 | unsigned long flags; | ||
| 368 | volatile unsigned char *ADDR = (unsigned char *) addr; | ||
| 369 | |||
| 370 | ADDR += nr >> 3; | ||
| 371 | mask = 1 << (nr & 0x07); | ||
| 372 | local_irq_save(flags); | ||
| 373 | retval = (mask & *ADDR) != 0; | ||
| 374 | *ADDR &= ~mask; | ||
| 375 | local_irq_restore(flags); | ||
| 376 | return retval; | ||
| 377 | } | ||
| 378 | |||
| 379 | static __inline__ int ext2_test_bit(int nr, const volatile void * addr) | ||
| 380 | { | ||
| 381 | int mask; | ||
| 382 | const volatile unsigned char *ADDR = (const unsigned char *) addr; | ||
| 383 | |||
| 384 | ADDR += nr >> 3; | ||
| 385 | mask = 1 << (nr & 0x07); | ||
| 386 | return ((mask & *ADDR) != 0); | ||
| 387 | } | ||
| 388 | |||
| 389 | #define ext2_find_first_zero_bit(addr, size) \ | ||
| 390 | ext2_find_next_zero_bit((addr), (size), 0) | ||
| 391 | |||
| 392 | static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset) | ||
| 393 | { | ||
| 394 | unsigned long *p = ((unsigned long *) addr) + (offset >> 5); | ||
| 395 | unsigned long result = offset & ~31UL; | ||
| 396 | unsigned long tmp; | ||
| 397 | |||
| 398 | if (offset >= size) | ||
| 399 | return size; | ||
| 400 | size -= result; | ||
| 401 | offset &= 31UL; | ||
| 402 | if(offset) { | ||
| 403 | /* We hold the little endian value in tmp, but then the | ||
| 404 | * shift is illegal. So we could keep a big endian value | ||
| 405 | * in tmp, like this: | ||
| 406 | * | ||
| 407 | * tmp = __swab32(*(p++)); | ||
| 408 | * tmp |= ~0UL >> (32-offset); | ||
| 409 | * | ||
| 410 | * but this would decrease preformance, so we change the | ||
| 411 | * shift: | ||
| 412 | */ | ||
| 413 | tmp = *(p++); | ||
| 414 | tmp |= __swab32(~0UL >> (32-offset)); | ||
| 415 | if(size < 32) | ||
| 416 | goto found_first; | ||
| 417 | if(~tmp) | ||
| 418 | goto found_middle; | ||
| 419 | size -= 32; | ||
| 420 | result += 32; | ||
| 421 | } | ||
| 422 | while(size & ~31UL) { | ||
| 423 | if(~(tmp = *(p++))) | ||
| 424 | goto found_middle; | ||
| 425 | result += 32; | ||
| 426 | size -= 32; | ||
| 427 | } | ||
| 428 | if(!size) | ||
| 429 | return result; | ||
| 430 | tmp = *p; | ||
| 431 | |||
| 432 | found_first: | ||
| 433 | /* tmp is little endian, so we would have to swab the shift, | ||
| 434 | * see above. But then we have to swab tmp below for ffz, so | ||
| 435 | * we might as well do this here. | ||
| 436 | */ | ||
| 437 | return result + ffz(__swab32(tmp) | (~0UL << size)); | ||
| 438 | found_middle: | ||
| 439 | return result + ffz(__swab32(tmp)); | ||
| 440 | } | ||
| 441 | #endif | ||
| 442 | |||
| 443 | #define ext2_set_bit_atomic(lock, nr, addr) \ | ||
| 444 | ({ \ | ||
| 445 | int ret; \ | ||
| 446 | spin_lock(lock); \ | ||
| 447 | ret = ext2_set_bit((nr), (addr)); \ | ||
| 448 | spin_unlock(lock); \ | ||
| 449 | ret; \ | ||
| 450 | }) | ||
| 451 | |||
| 452 | #define ext2_clear_bit_atomic(lock, nr, addr) \ | ||
| 453 | ({ \ | ||
| 454 | int ret; \ | ||
| 455 | spin_lock(lock); \ | ||
| 456 | ret = ext2_clear_bit((nr), (addr)); \ | ||
| 457 | spin_unlock(lock); \ | ||
| 458 | ret; \ | ||
| 459 | }) | ||
| 460 | |||
| 461 | /* Bitmap functions for the minix filesystem. */ | ||
| 462 | #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr) | ||
| 463 | #define minix_set_bit(nr,addr) set_bit(nr,addr) | ||
| 464 | #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr) | ||
| 465 | #define minix_test_bit(nr,addr) test_bit(nr,addr) | ||
| 466 | #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) | ||
| 467 | |||
| 468 | /* | ||
| 469 | * fls: find last bit set. | ||
| 470 | */ | ||
| 471 | |||
| 472 | #define fls(x) generic_fls(x) | ||
| 473 | #define fls64(x) generic_fls64(x) | ||
| 474 | 146 | ||
| 475 | #endif /* __KERNEL__ */ | 147 | #endif /* __KERNEL__ */ |
| 476 | 148 | ||
diff --git a/include/asm-sh/page.h b/include/asm-sh/page.h index 972c3f655b2a..9c89287c3e56 100644 --- a/include/asm-sh/page.h +++ b/include/asm-sh/page.h | |||
| @@ -105,9 +105,7 @@ typedef struct { unsigned long pgprot; } pgprot_t; | |||
| 105 | 105 | ||
| 106 | /* PFN start number, because of __MEMORY_START */ | 106 | /* PFN start number, because of __MEMORY_START */ |
| 107 | #define PFN_START (__MEMORY_START >> PAGE_SHIFT) | 107 | #define PFN_START (__MEMORY_START >> PAGE_SHIFT) |
| 108 | 108 | #define ARCH_PFN_OFFSET (FPN_START) | |
| 109 | #define pfn_to_page(pfn) (mem_map + (pfn) - PFN_START) | ||
| 110 | #define page_to_pfn(page) ((unsigned long)((page) - mem_map) + PFN_START) | ||
| 111 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) | 109 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) |
| 112 | #define pfn_valid(pfn) (((pfn) - PFN_START) < max_mapnr) | 110 | #define pfn_valid(pfn) (((pfn) - PFN_START) < max_mapnr) |
| 113 | #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) | 111 | #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) |
| @@ -117,6 +115,7 @@ typedef struct { unsigned long pgprot; } pgprot_t; | |||
| 117 | 115 | ||
| 118 | #endif /* __KERNEL__ */ | 116 | #endif /* __KERNEL__ */ |
| 119 | 117 | ||
| 118 | #include <asm-generic/memory_model.h> | ||
| 120 | #include <asm-generic/page.h> | 119 | #include <asm-generic/page.h> |
| 121 | 120 | ||
| 122 | #endif /* __ASM_SH_PAGE_H */ | 121 | #endif /* __ASM_SH_PAGE_H */ |
diff --git a/include/asm-sh/stat.h b/include/asm-sh/stat.h index 914e3fcbbd37..6c41a60657f1 100644 --- a/include/asm-sh/stat.h +++ b/include/asm-sh/stat.h | |||
| @@ -60,13 +60,7 @@ struct stat64 { | |||
| 60 | long long st_size; | 60 | long long st_size; |
| 61 | unsigned long st_blksize; | 61 | unsigned long st_blksize; |
| 62 | 62 | ||
| 63 | #if defined(__BIG_ENDIAN__) | 63 | unsigned long long st_blocks; /* Number 512-byte blocks allocated. */ |
| 64 | unsigned long __pad4; /* Future possible st_blocks hi bits */ | ||
| 65 | unsigned long st_blocks; /* Number 512-byte blocks allocated. */ | ||
| 66 | #else /* Must be little */ | ||
| 67 | unsigned long st_blocks; /* Number 512-byte blocks allocated. */ | ||
| 68 | unsigned long __pad4; /* Future possible st_blocks hi bits */ | ||
| 69 | #endif | ||
| 70 | 64 | ||
| 71 | unsigned long st_atime; | 65 | unsigned long st_atime; |
| 72 | unsigned long st_atime_nsec; | 66 | unsigned long st_atime_nsec; |
diff --git a/include/asm-sh/thread_info.h b/include/asm-sh/thread_info.h index 85f0c11b4319..7345350d98c0 100644 --- a/include/asm-sh/thread_info.h +++ b/include/asm-sh/thread_info.h | |||
| @@ -18,7 +18,7 @@ | |||
| 18 | struct thread_info { | 18 | struct thread_info { |
| 19 | struct task_struct *task; /* main task structure */ | 19 | struct task_struct *task; /* main task structure */ |
| 20 | struct exec_domain *exec_domain; /* execution domain */ | 20 | struct exec_domain *exec_domain; /* execution domain */ |
| 21 | __u32 flags; /* low level flags */ | 21 | unsigned long flags; /* low level flags */ |
| 22 | __u32 cpu; | 22 | __u32 cpu; |
| 23 | int preempt_count; /* 0 => preemptable, <0 => BUG */ | 23 | int preempt_count; /* 0 => preemptable, <0 => BUG */ |
| 24 | struct restart_block restart_block; | 24 | struct restart_block restart_block; |
diff --git a/include/asm-sh/types.h b/include/asm-sh/types.h index cb7e183a0a6b..488552f43b2a 100644 --- a/include/asm-sh/types.h +++ b/include/asm-sh/types.h | |||
| @@ -58,6 +58,11 @@ typedef u64 sector_t; | |||
| 58 | #define HAVE_SECTOR_T | 58 | #define HAVE_SECTOR_T |
| 59 | #endif | 59 | #endif |
| 60 | 60 | ||
| 61 | #ifdef CONFIG_LSF | ||
| 62 | typedef u64 blkcnt_t; | ||
| 63 | #define HAVE_BLKCNT_T | ||
| 64 | #endif | ||
| 65 | |||
| 61 | #endif /* __ASSEMBLY__ */ | 66 | #endif /* __ASSEMBLY__ */ |
| 62 | 67 | ||
| 63 | #endif /* __KERNEL__ */ | 68 | #endif /* __KERNEL__ */ |
diff --git a/include/asm-sh64/bitops.h b/include/asm-sh64/bitops.h index ce9c3ad45fe0..f3bdcdb5d046 100644 --- a/include/asm-sh64/bitops.h +++ b/include/asm-sh64/bitops.h | |||
| @@ -31,16 +31,6 @@ static __inline__ void set_bit(int nr, volatile void * addr) | |||
| 31 | local_irq_restore(flags); | 31 | local_irq_restore(flags); |
| 32 | } | 32 | } |
| 33 | 33 | ||
| 34 | static inline void __set_bit(int nr, void *addr) | ||
| 35 | { | ||
| 36 | int mask; | ||
| 37 | unsigned int *a = addr; | ||
| 38 | |||
| 39 | a += nr >> 5; | ||
| 40 | mask = 1 << (nr & 0x1f); | ||
| 41 | *a |= mask; | ||
| 42 | } | ||
| 43 | |||
| 44 | /* | 34 | /* |
| 45 | * clear_bit() doesn't provide any barrier for the compiler. | 35 | * clear_bit() doesn't provide any barrier for the compiler. |
| 46 | */ | 36 | */ |
| @@ -58,15 +48,6 @@ static inline void clear_bit(int nr, volatile unsigned long *a) | |||
| 58 | local_irq_restore(flags); | 48 | local_irq_restore(flags); |
| 59 | } | 49 | } |
| 60 | 50 | ||
| 61 | static inline void __clear_bit(int nr, volatile unsigned long *a) | ||
| 62 | { | ||
| 63 | int mask; | ||
| 64 | |||
| 65 | a += nr >> 5; | ||
| 66 | mask = 1 << (nr & 0x1f); | ||
| 67 | *a &= ~mask; | ||
| 68 | } | ||
| 69 | |||
| 70 | static __inline__ void change_bit(int nr, volatile void * addr) | 51 | static __inline__ void change_bit(int nr, volatile void * addr) |
| 71 | { | 52 | { |
| 72 | int mask; | 53 | int mask; |
| @@ -80,16 +61,6 @@ static __inline__ void change_bit(int nr, volatile void * addr) | |||
| 80 | local_irq_restore(flags); | 61 | local_irq_restore(flags); |
| 81 | } | 62 | } |
| 82 | 63 | ||
| 83 | static __inline__ void __change_bit(int nr, volatile void * addr) | ||
| 84 | { | ||
| 85 | int mask; | ||
| 86 | volatile unsigned int *a = addr; | ||
| 87 | |||
| 88 | a += nr >> 5; | ||
| 89 | mask = 1 << (nr & 0x1f); | ||
| 90 | *a ^= mask; | ||
| 91 | } | ||
| 92 | |||
| 93 | static __inline__ int test_and_set_bit(int nr, volatile void * addr) | 64 | static __inline__ int test_and_set_bit(int nr, volatile void * addr) |
| 94 | { | 65 | { |
| 95 | int mask, retval; | 66 | int mask, retval; |
| @@ -106,19 +77,6 @@ static __inline__ int test_and_set_bit(int nr, volatile void * addr) | |||
| 106 | return retval; | 77 | return retval; |
| 107 | } | 78 | } |
| 108 | 79 | ||
| 109 | static __inline__ int __test_and_set_bit(int nr, volatile void * addr) | ||
| 110 | { | ||
| 111 | int mask, retval; | ||
| 112 | volatile unsigned int *a = addr; | ||
| 113 | |||
| 114 | a += nr >> 5; | ||
| 115 | mask = 1 << (nr & 0x1f); | ||
| 116 | retval = (mask & *a) != 0; | ||
| 117 | *a |= mask; | ||
| 118 | |||
| 119 | return retval; | ||
| 120 | } | ||
| 121 | |||
| 122 | static __inline__ int test_and_clear_bit(int nr, volatile void * addr) | 80 | static __inline__ int test_and_clear_bit(int nr, volatile void * addr) |
| 123 | { | 81 | { |
| 124 | int mask, retval; | 82 | int mask, retval; |
| @@ -135,19 +93,6 @@ static __inline__ int test_and_clear_bit(int nr, volatile void * addr) | |||
| 135 | return retval; | 93 | return retval; |
| 136 | } | 94 | } |
| 137 | 95 | ||
| 138 | static __inline__ int __test_and_clear_bit(int nr, volatile void * addr) | ||
| 139 | { | ||
| 140 | int mask, retval; | ||
| 141 | volatile unsigned int *a = addr; | ||
| 142 | |||
| 143 | a += nr >> 5; | ||
| 144 | mask = 1 << (nr & 0x1f); | ||
| 145 | retval = (mask & *a) != 0; | ||
| 146 | *a &= ~mask; | ||
| 147 | |||
| 148 | return retval; | ||
| 149 | } | ||
| 150 | |||
| 151 | static __inline__ int test_and_change_bit(int nr, volatile void * addr) | 96 | static __inline__ int test_and_change_bit(int nr, volatile void * addr) |
| 152 | { | 97 | { |
| 153 | int mask, retval; | 98 | int mask, retval; |
| @@ -164,23 +109,7 @@ static __inline__ int test_and_change_bit(int nr, volatile void * addr) | |||
| 164 | return retval; | 109 | return retval; |
| 165 | } | 110 | } |
| 166 | 111 | ||
| 167 | static __inline__ int __test_and_change_bit(int nr, volatile void * addr) | 112 | #include <asm-generic/bitops/non-atomic.h> |
| 168 | { | ||
| 169 | int mask, retval; | ||
| 170 | volatile unsigned int *a = addr; | ||
| 171 | |||
| 172 | a += nr >> 5; | ||
| 173 | mask = 1 << (nr & 0x1f); | ||
| 174 | retval = (mask & *a) != 0; | ||
| 175 | *a ^= mask; | ||
| 176 | |||
| 177 | return retval; | ||
| 178 | } | ||
| 179 | |||
| 180 | static __inline__ int test_bit(int nr, const volatile void *addr) | ||
| 181 | { | ||
| 182 | return 1UL & (((const volatile unsigned int *) addr)[nr >> 5] >> (nr & 31)); | ||
| 183 | } | ||
| 184 | 113 | ||
| 185 | static __inline__ unsigned long ffz(unsigned long word) | 114 | static __inline__ unsigned long ffz(unsigned long word) |
| 186 | { | 115 | { |
| @@ -204,313 +133,16 @@ static __inline__ unsigned long ffz(unsigned long word) | |||
| 204 | return result; | 133 | return result; |
| 205 | } | 134 | } |
| 206 | 135 | ||
| 207 | /** | 136 | #include <asm-generic/bitops/__ffs.h> |
| 208 | * __ffs - find first bit in word | 137 | #include <asm-generic/bitops/find.h> |
| 209 | * @word: The word to search | 138 | #include <asm-generic/bitops/hweight.h> |
| 210 | * | 139 | #include <asm-generic/bitops/sched.h> |
| 211 | * Undefined if no bit exists, so code should check against 0 first. | 140 | #include <asm-generic/bitops/ffs.h> |
| 212 | */ | 141 | #include <asm-generic/bitops/ext2-non-atomic.h> |
| 213 | static inline unsigned long __ffs(unsigned long word) | 142 | #include <asm-generic/bitops/ext2-atomic.h> |
| 214 | { | 143 | #include <asm-generic/bitops/minix.h> |
| 215 | int r = 0; | 144 | #include <asm-generic/bitops/fls.h> |
| 216 | 145 | #include <asm-generic/bitops/fls64.h> | |
| 217 | if (!word) | ||
| 218 | return 0; | ||
| 219 | if (!(word & 0xffff)) { | ||
| 220 | word >>= 16; | ||
| 221 | r += 16; | ||
| 222 | } | ||
| 223 | if (!(word & 0xff)) { | ||
| 224 | word >>= 8; | ||
| 225 | r += 8; | ||
| 226 | } | ||
| 227 | if (!(word & 0xf)) { | ||
| 228 | word >>= 4; | ||
| 229 | r += 4; | ||
| 230 | } | ||
| 231 | if (!(word & 3)) { | ||
| 232 | word >>= 2; | ||
| 233 | r += 2; | ||
| 234 | } | ||
| 235 | if (!(word & 1)) { | ||
| 236 | word >>= 1; | ||
| 237 | r += 1; | ||
| 238 | } | ||
| 239 | return r; | ||
| 240 | } | ||
| 241 | |||
| 242 | /** | ||
| 243 | * find_next_bit - find the next set bit in a memory region | ||
| 244 | * @addr: The address to base the search on | ||
| 245 | * @offset: The bitnumber to start searching at | ||
| 246 | * @size: The maximum size to search | ||
| 247 | */ | ||
| 248 | static inline unsigned long find_next_bit(const unsigned long *addr, | ||
| 249 | unsigned long size, unsigned long offset) | ||
| 250 | { | ||
| 251 | unsigned int *p = ((unsigned int *) addr) + (offset >> 5); | ||
| 252 | unsigned int result = offset & ~31UL; | ||
| 253 | unsigned int tmp; | ||
| 254 | |||
| 255 | if (offset >= size) | ||
| 256 | return size; | ||
| 257 | size -= result; | ||
| 258 | offset &= 31UL; | ||
| 259 | if (offset) { | ||
| 260 | tmp = *p++; | ||
| 261 | tmp &= ~0UL << offset; | ||
| 262 | if (size < 32) | ||
| 263 | goto found_first; | ||
| 264 | if (tmp) | ||
| 265 | goto found_middle; | ||
| 266 | size -= 32; | ||
| 267 | result += 32; | ||
| 268 | } | ||
| 269 | while (size >= 32) { | ||
| 270 | if ((tmp = *p++) != 0) | ||
| 271 | goto found_middle; | ||
| 272 | result += 32; | ||
| 273 | size -= 32; | ||
| 274 | } | ||
| 275 | if (!size) | ||
| 276 | return result; | ||
| 277 | tmp = *p; | ||
| 278 | |||
| 279 | found_first: | ||
| 280 | tmp &= ~0UL >> (32 - size); | ||
| 281 | if (tmp == 0UL) /* Are any bits set? */ | ||
| 282 | return result + size; /* Nope. */ | ||
| 283 | found_middle: | ||
| 284 | return result + __ffs(tmp); | ||
| 285 | } | ||
| 286 | |||
| 287 | /** | ||
| 288 | * find_first_bit - find the first set bit in a memory region | ||
| 289 | * @addr: The address to start the search at | ||
| 290 | * @size: The maximum size to search | ||
| 291 | * | ||
| 292 | * Returns the bit-number of the first set bit, not the number of the byte | ||
| 293 | * containing a bit. | ||
| 294 | */ | ||
| 295 | #define find_first_bit(addr, size) \ | ||
| 296 | find_next_bit((addr), (size), 0) | ||
| 297 | |||
| 298 | |||
| 299 | static inline int find_next_zero_bit(void *addr, int size, int offset) | ||
| 300 | { | ||
| 301 | unsigned long *p = ((unsigned long *) addr) + (offset >> 5); | ||
| 302 | unsigned long result = offset & ~31UL; | ||
| 303 | unsigned long tmp; | ||
| 304 | |||
| 305 | if (offset >= size) | ||
| 306 | return size; | ||
| 307 | size -= result; | ||
| 308 | offset &= 31UL; | ||
| 309 | if (offset) { | ||
| 310 | tmp = *(p++); | ||
| 311 | tmp |= ~0UL >> (32-offset); | ||
| 312 | if (size < 32) | ||
| 313 | goto found_first; | ||
| 314 | if (~tmp) | ||
| 315 | goto found_middle; | ||
| 316 | size -= 32; | ||
| 317 | result += 32; | ||
| 318 | } | ||
| 319 | while (size & ~31UL) { | ||
| 320 | if (~(tmp = *(p++))) | ||
| 321 | goto found_middle; | ||
| 322 | result += 32; | ||
| 323 | size -= 32; | ||
| 324 | } | ||
| 325 | if (!size) | ||
| 326 | return result; | ||
| 327 | tmp = *p; | ||
| 328 | |||
| 329 | found_first: | ||
| 330 | tmp |= ~0UL << size; | ||
| 331 | found_middle: | ||
| 332 | return result + ffz(tmp); | ||
| 333 | } | ||
| 334 | |||
| 335 | #define find_first_zero_bit(addr, size) \ | ||
| 336 | find_next_zero_bit((addr), (size), 0) | ||
| 337 | |||
| 338 | /* | ||
| 339 | * hweightN: returns the hamming weight (i.e. the number | ||
| 340 | * of bits set) of a N-bit word | ||
| 341 | */ | ||
| 342 | |||
| 343 | #define hweight32(x) generic_hweight32(x) | ||
| 344 | #define hweight16(x) generic_hweight16(x) | ||
| 345 | #define hweight8(x) generic_hweight8(x) | ||
| 346 | |||
| 347 | /* | ||
| 348 | * Every architecture must define this function. It's the fastest | ||
| 349 | * way of searching a 140-bit bitmap where the first 100 bits are | ||
| 350 | * unlikely to be set. It's guaranteed that at least one of the 140 | ||
| 351 | * bits is cleared. | ||
| 352 | */ | ||
| 353 | |||
| 354 | static inline int sched_find_first_bit(unsigned long *b) | ||
| 355 | { | ||
| 356 | if (unlikely(b[0])) | ||
| 357 | return __ffs(b[0]); | ||
| 358 | if (unlikely(b[1])) | ||
| 359 | return __ffs(b[1]) + 32; | ||
| 360 | if (unlikely(b[2])) | ||
| 361 | return __ffs(b[2]) + 64; | ||
| 362 | if (b[3]) | ||
| 363 | return __ffs(b[3]) + 96; | ||
| 364 | return __ffs(b[4]) + 128; | ||
| 365 | } | ||
| 366 | |||
| 367 | /* | ||
| 368 | * ffs: find first bit set. This is defined the same way as | ||
| 369 | * the libc and compiler builtin ffs routines, therefore | ||
| 370 | * differs in spirit from the above ffz (man ffs). | ||
| 371 | */ | ||
| 372 | |||
| 373 | #define ffs(x) generic_ffs(x) | ||
| 374 | |||
| 375 | /* | ||
| 376 | * hweightN: returns the hamming weight (i.e. the number | ||
| 377 | * of bits set) of a N-bit word | ||
| 378 | */ | ||
| 379 | |||
| 380 | #define hweight32(x) generic_hweight32(x) | ||
| 381 | #define hweight16(x) generic_hweight16(x) | ||
| 382 | #define hweight8(x) generic_hweight8(x) | ||
| 383 | |||
| 384 | #ifdef __LITTLE_ENDIAN__ | ||
| 385 | #define ext2_set_bit(nr, addr) test_and_set_bit((nr), (addr)) | ||
| 386 | #define ext2_clear_bit(nr, addr) test_and_clear_bit((nr), (addr)) | ||
| 387 | #define ext2_test_bit(nr, addr) test_bit((nr), (addr)) | ||
| 388 | #define ext2_find_first_zero_bit(addr, size) find_first_zero_bit((addr), (size)) | ||
| 389 | #define ext2_find_next_zero_bit(addr, size, offset) \ | ||
| 390 | find_next_zero_bit((addr), (size), (offset)) | ||
| 391 | #else | ||
| 392 | static __inline__ int ext2_set_bit(int nr, volatile void * addr) | ||
| 393 | { | ||
| 394 | int mask, retval; | ||
| 395 | unsigned long flags; | ||
| 396 | volatile unsigned char *ADDR = (unsigned char *) addr; | ||
| 397 | |||
| 398 | ADDR += nr >> 3; | ||
| 399 | mask = 1 << (nr & 0x07); | ||
| 400 | local_irq_save(flags); | ||
| 401 | retval = (mask & *ADDR) != 0; | ||
| 402 | *ADDR |= mask; | ||
| 403 | local_irq_restore(flags); | ||
| 404 | return retval; | ||
| 405 | } | ||
| 406 | |||
| 407 | static __inline__ int ext2_clear_bit(int nr, volatile void * addr) | ||
| 408 | { | ||
| 409 | int mask, retval; | ||
| 410 | unsigned long flags; | ||
| 411 | volatile unsigned char *ADDR = (unsigned char *) addr; | ||
| 412 | |||
| 413 | ADDR += nr >> 3; | ||
| 414 | mask = 1 << (nr & 0x07); | ||
| 415 | local_irq_save(flags); | ||
| 416 | retval = (mask & *ADDR) != 0; | ||
| 417 | *ADDR &= ~mask; | ||
| 418 | local_irq_restore(flags); | ||
| 419 | return retval; | ||
| 420 | } | ||
| 421 | |||
| 422 | static __inline__ int ext2_test_bit(int nr, const volatile void * addr) | ||
| 423 | { | ||
| 424 | int mask; | ||
| 425 | const volatile unsigned char *ADDR = (const unsigned char *) addr; | ||
| 426 | |||
| 427 | ADDR += nr >> 3; | ||
| 428 | mask = 1 << (nr & 0x07); | ||
| 429 | return ((mask & *ADDR) != 0); | ||
| 430 | } | ||
| 431 | |||
| 432 | #define ext2_find_first_zero_bit(addr, size) \ | ||
| 433 | ext2_find_next_zero_bit((addr), (size), 0) | ||
| 434 | |||
| 435 | static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset) | ||
| 436 | { | ||
| 437 | unsigned long *p = ((unsigned long *) addr) + (offset >> 5); | ||
| 438 | unsigned long result = offset & ~31UL; | ||
| 439 | unsigned long tmp; | ||
| 440 | |||
| 441 | if (offset >= size) | ||
| 442 | return size; | ||
| 443 | size -= result; | ||
| 444 | offset &= 31UL; | ||
| 445 | if(offset) { | ||
| 446 | /* We hold the little endian value in tmp, but then the | ||
| 447 | * shift is illegal. So we could keep a big endian value | ||
| 448 | * in tmp, like this: | ||
| 449 | * | ||
| 450 | * tmp = __swab32(*(p++)); | ||
| 451 | * tmp |= ~0UL >> (32-offset); | ||
| 452 | * | ||
| 453 | * but this would decrease preformance, so we change the | ||
| 454 | * shift: | ||
| 455 | */ | ||
| 456 | tmp = *(p++); | ||
| 457 | tmp |= __swab32(~0UL >> (32-offset)); | ||
| 458 | if(size < 32) | ||
| 459 | goto found_first; | ||
| 460 | if(~tmp) | ||
| 461 | goto found_middle; | ||
| 462 | size -= 32; | ||
| 463 | result += 32; | ||
| 464 | } | ||
| 465 | while(size & ~31UL) { | ||
| 466 | if(~(tmp = *(p++))) | ||
| 467 | goto found_middle; | ||
| 468 | result += 32; | ||
| 469 | size -= 32; | ||
| 470 | } | ||
| 471 | if(!size) | ||
| 472 | return result; | ||
| 473 | tmp = *p; | ||
| 474 | |||
| 475 | found_first: | ||
| 476 | /* tmp is little endian, so we would have to swab the shift, | ||
| 477 | * see above. But then we have to swab tmp below for ffz, so | ||
| 478 | * we might as well do this here. | ||
| 479 | */ | ||
| 480 | return result + ffz(__swab32(tmp) | (~0UL << size)); | ||
| 481 | found_middle: | ||
| 482 | return result + ffz(__swab32(tmp)); | ||
| 483 | } | ||
| 484 | #endif | ||
| 485 | |||
| 486 | #define ext2_set_bit_atomic(lock, nr, addr) \ | ||
| 487 | ({ \ | ||
| 488 | int ret; \ | ||
| 489 | spin_lock(lock); \ | ||
| 490 | ret = ext2_set_bit((nr), (addr)); \ | ||
| 491 | spin_unlock(lock); \ | ||
| 492 | ret; \ | ||
| 493 | }) | ||
| 494 | |||
| 495 | #define ext2_clear_bit_atomic(lock, nr, addr) \ | ||
| 496 | ({ \ | ||
| 497 | int ret; \ | ||
| 498 | spin_lock(lock); \ | ||
| 499 | ret = ext2_clear_bit((nr), (addr)); \ | ||
| 500 | spin_unlock(lock); \ | ||
| 501 | ret; \ | ||
| 502 | }) | ||
| 503 | |||
| 504 | /* Bitmap functions for the minix filesystem. */ | ||
| 505 | #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr) | ||
| 506 | #define minix_set_bit(nr,addr) set_bit(nr,addr) | ||
| 507 | #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr) | ||
| 508 | #define minix_test_bit(nr,addr) test_bit(nr,addr) | ||
| 509 | #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) | ||
| 510 | |||
| 511 | #define ffs(x) generic_ffs(x) | ||
| 512 | #define fls(x) generic_fls(x) | ||
| 513 | #define fls64(x) generic_fls64(x) | ||
| 514 | 146 | ||
| 515 | #endif /* __KERNEL__ */ | 147 | #endif /* __KERNEL__ */ |
| 516 | 148 | ||
diff --git a/include/asm-sh64/page.h b/include/asm-sh64/page.h index c86df90f7cbd..e4937cdabebd 100644 --- a/include/asm-sh64/page.h +++ b/include/asm-sh64/page.h | |||
| @@ -105,9 +105,7 @@ typedef struct { unsigned long pgprot; } pgprot_t; | |||
| 105 | 105 | ||
| 106 | /* PFN start number, because of __MEMORY_START */ | 106 | /* PFN start number, because of __MEMORY_START */ |
| 107 | #define PFN_START (__MEMORY_START >> PAGE_SHIFT) | 107 | #define PFN_START (__MEMORY_START >> PAGE_SHIFT) |
| 108 | 108 | #define ARCH_PFN_OFFSET (PFN_START) | |
| 109 | #define pfn_to_page(pfn) (mem_map + (pfn) - PFN_START) | ||
| 110 | #define page_to_pfn(page) ((unsigned long)((page) - mem_map) + PFN_START) | ||
| 111 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) | 109 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) |
| 112 | #define pfn_valid(pfn) (((pfn) - PFN_START) < max_mapnr) | 110 | #define pfn_valid(pfn) (((pfn) - PFN_START) < max_mapnr) |
| 113 | #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) | 111 | #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) |
| @@ -117,6 +115,7 @@ typedef struct { unsigned long pgprot; } pgprot_t; | |||
| 117 | 115 | ||
| 118 | #endif /* __KERNEL__ */ | 116 | #endif /* __KERNEL__ */ |
| 119 | 117 | ||
| 118 | #include <asm-generic/memory_model.h> | ||
| 120 | #include <asm-generic/page.h> | 119 | #include <asm-generic/page.h> |
| 121 | 120 | ||
| 122 | #endif /* __ASM_SH64_PAGE_H */ | 121 | #endif /* __ASM_SH64_PAGE_H */ |
diff --git a/include/asm-sh64/platform.h b/include/asm-sh64/platform.h index 7046a9014027..bd0d9c405a80 100644 --- a/include/asm-sh64/platform.h +++ b/include/asm-sh64/platform.h | |||
| @@ -61,9 +61,4 @@ extern int platform_int_priority[NR_INTC_IRQS]; | |||
| 61 | #define code_resource (platform_parms.kram_res_p[STANDARD_KRAM_RESOURCES - 2]) | 61 | #define code_resource (platform_parms.kram_res_p[STANDARD_KRAM_RESOURCES - 2]) |
| 62 | #define data_resource (platform_parms.kram_res_p[STANDARD_KRAM_RESOURCES - 1]) | 62 | #define data_resource (platform_parms.kram_res_p[STANDARD_KRAM_RESOURCES - 1]) |
| 63 | 63 | ||
| 64 | /* Be prepared to 64-bit sign extensions */ | ||
| 65 | #define PFN_UP(x) ((((x) + PAGE_SIZE-1) >> PAGE_SHIFT) & 0x000fffff) | ||
| 66 | #define PFN_DOWN(x) (((x) >> PAGE_SHIFT) & 0x000fffff) | ||
| 67 | #define PFN_PHYS(x) ((x) << PAGE_SHIFT) | ||
| 68 | |||
| 69 | #endif /* __ASM_SH64_PLATFORM_H */ | 64 | #endif /* __ASM_SH64_PLATFORM_H */ |
diff --git a/include/asm-sparc/bitops.h b/include/asm-sparc/bitops.h index 41722b5e45ef..04aa3318f76a 100644 --- a/include/asm-sparc/bitops.h +++ b/include/asm-sparc/bitops.h | |||
| @@ -152,386 +152,22 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr) | |||
| 152 | : "memory", "cc"); | 152 | : "memory", "cc"); |
| 153 | } | 153 | } |
| 154 | 154 | ||
| 155 | /* | 155 | #include <asm-generic/bitops/non-atomic.h> |
| 156 | * non-atomic versions | ||
| 157 | */ | ||
| 158 | static inline void __set_bit(int nr, volatile unsigned long *addr) | ||
| 159 | { | ||
| 160 | unsigned long mask = 1UL << (nr & 0x1f); | ||
| 161 | unsigned long *p = ((unsigned long *)addr) + (nr >> 5); | ||
| 162 | |||
| 163 | *p |= mask; | ||
| 164 | } | ||
| 165 | |||
| 166 | static inline void __clear_bit(int nr, volatile unsigned long *addr) | ||
| 167 | { | ||
| 168 | unsigned long mask = 1UL << (nr & 0x1f); | ||
| 169 | unsigned long *p = ((unsigned long *)addr) + (nr >> 5); | ||
| 170 | |||
| 171 | *p &= ~mask; | ||
| 172 | } | ||
| 173 | |||
| 174 | static inline void __change_bit(int nr, volatile unsigned long *addr) | ||
| 175 | { | ||
| 176 | unsigned long mask = 1UL << (nr & 0x1f); | ||
| 177 | unsigned long *p = ((unsigned long *)addr) + (nr >> 5); | ||
| 178 | |||
| 179 | *p ^= mask; | ||
| 180 | } | ||
| 181 | |||
| 182 | static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) | ||
| 183 | { | ||
| 184 | unsigned long mask = 1UL << (nr & 0x1f); | ||
| 185 | unsigned long *p = ((unsigned long *)addr) + (nr >> 5); | ||
| 186 | unsigned long old = *p; | ||
| 187 | |||
| 188 | *p = old | mask; | ||
| 189 | return (old & mask) != 0; | ||
| 190 | } | ||
| 191 | |||
| 192 | static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) | ||
| 193 | { | ||
| 194 | unsigned long mask = 1UL << (nr & 0x1f); | ||
| 195 | unsigned long *p = ((unsigned long *)addr) + (nr >> 5); | ||
| 196 | unsigned long old = *p; | ||
| 197 | |||
| 198 | *p = old & ~mask; | ||
| 199 | return (old & mask) != 0; | ||
| 200 | } | ||
| 201 | |||
| 202 | static inline int __test_and_change_bit(int nr, volatile unsigned long *addr) | ||
| 203 | { | ||
| 204 | unsigned long mask = 1UL << (nr & 0x1f); | ||
| 205 | unsigned long *p = ((unsigned long *)addr) + (nr >> 5); | ||
| 206 | unsigned long old = *p; | ||
| 207 | |||
| 208 | *p = old ^ mask; | ||
| 209 | return (old & mask) != 0; | ||
| 210 | } | ||
| 211 | 156 | ||
| 212 | #define smp_mb__before_clear_bit() do { } while(0) | 157 | #define smp_mb__before_clear_bit() do { } while(0) |
| 213 | #define smp_mb__after_clear_bit() do { } while(0) | 158 | #define smp_mb__after_clear_bit() do { } while(0) |
| 214 | 159 | ||
| 215 | /* The following routine need not be atomic. */ | 160 | #include <asm-generic/bitops/ffz.h> |
| 216 | static inline int test_bit(int nr, __const__ volatile unsigned long *addr) | 161 | #include <asm-generic/bitops/__ffs.h> |
| 217 | { | 162 | #include <asm-generic/bitops/sched.h> |
| 218 | return (1UL & (((unsigned long *)addr)[nr >> 5] >> (nr & 31))) != 0UL; | 163 | #include <asm-generic/bitops/ffs.h> |
| 219 | } | 164 | #include <asm-generic/bitops/fls.h> |
| 220 | 165 | #include <asm-generic/bitops/fls64.h> | |
| 221 | /* The easy/cheese version for now. */ | 166 | #include <asm-generic/bitops/hweight.h> |
| 222 | static inline unsigned long ffz(unsigned long word) | 167 | #include <asm-generic/bitops/find.h> |
| 223 | { | 168 | #include <asm-generic/bitops/ext2-non-atomic.h> |
| 224 | unsigned long result = 0; | 169 | #include <asm-generic/bitops/ext2-atomic.h> |
| 225 | 170 | #include <asm-generic/bitops/minix.h> | |
| 226 | while(word & 1) { | ||
| 227 | result++; | ||
| 228 | word >>= 1; | ||
| 229 | } | ||
| 230 | return result; | ||
| 231 | } | ||
| 232 | |||
| 233 | /** | ||
| 234 | * __ffs - find first bit in word. | ||
| 235 | * @word: The word to search | ||
| 236 | * | ||
| 237 | * Undefined if no bit exists, so code should check against 0 first. | ||
| 238 | */ | ||
| 239 | static inline int __ffs(unsigned long word) | ||
| 240 | { | ||
| 241 | int num = 0; | ||
| 242 | |||
| 243 | if ((word & 0xffff) == 0) { | ||
| 244 | num += 16; | ||
| 245 | word >>= 16; | ||
| 246 | } | ||
| 247 | if ((word & 0xff) == 0) { | ||
| 248 | num += 8; | ||
| 249 | word >>= 8; | ||
| 250 | } | ||
| 251 | if ((word & 0xf) == 0) { | ||
| 252 | num += 4; | ||
| 253 | word >>= 4; | ||
| 254 | } | ||
| 255 | if ((word & 0x3) == 0) { | ||
| 256 | num += 2; | ||
| 257 | word >>= 2; | ||
| 258 | } | ||
| 259 | if ((word & 0x1) == 0) | ||
| 260 | num += 1; | ||
| 261 | return num; | ||
| 262 | } | ||
| 263 | |||
| 264 | /* | ||
| 265 | * Every architecture must define this function. It's the fastest | ||
| 266 | * way of searching a 140-bit bitmap where the first 100 bits are | ||
| 267 | * unlikely to be set. It's guaranteed that at least one of the 140 | ||
| 268 | * bits is cleared. | ||
| 269 | */ | ||
| 270 | static inline int sched_find_first_bit(unsigned long *b) | ||
| 271 | { | ||
| 272 | |||
| 273 | if (unlikely(b[0])) | ||
| 274 | return __ffs(b[0]); | ||
| 275 | if (unlikely(b[1])) | ||
| 276 | return __ffs(b[1]) + 32; | ||
| 277 | if (unlikely(b[2])) | ||
| 278 | return __ffs(b[2]) + 64; | ||
| 279 | if (b[3]) | ||
| 280 | return __ffs(b[3]) + 96; | ||
| 281 | return __ffs(b[4]) + 128; | ||
| 282 | } | ||
| 283 | |||
| 284 | /* | ||
| 285 | * ffs: find first bit set. This is defined the same way as | ||
| 286 | * the libc and compiler builtin ffs routines, therefore | ||
| 287 | * differs in spirit from the above ffz (man ffs). | ||
| 288 | */ | ||
| 289 | static inline int ffs(int x) | ||
| 290 | { | ||
| 291 | if (!x) | ||
| 292 | return 0; | ||
| 293 | return __ffs((unsigned long)x) + 1; | ||
| 294 | } | ||
| 295 | |||
| 296 | /* | ||
| 297 | * fls: find last (most-significant) bit set. | ||
| 298 | * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. | ||
| 299 | */ | ||
| 300 | #define fls(x) generic_fls(x) | ||
| 301 | #define fls64(x) generic_fls64(x) | ||
| 302 | |||
| 303 | /* | ||
| 304 | * hweightN: returns the hamming weight (i.e. the number | ||
| 305 | * of bits set) of a N-bit word | ||
| 306 | */ | ||
| 307 | #define hweight32(x) generic_hweight32(x) | ||
| 308 | #define hweight16(x) generic_hweight16(x) | ||
| 309 | #define hweight8(x) generic_hweight8(x) | ||
| 310 | |||
| 311 | /* | ||
| 312 | * find_next_zero_bit() finds the first zero bit in a bit string of length | ||
| 313 | * 'size' bits, starting the search at bit 'offset'. This is largely based | ||
| 314 | * on Linus's ALPHA routines, which are pretty portable BTW. | ||
| 315 | */ | ||
| 316 | static inline unsigned long find_next_zero_bit(const unsigned long *addr, | ||
| 317 | unsigned long size, unsigned long offset) | ||
| 318 | { | ||
| 319 | const unsigned long *p = addr + (offset >> 5); | ||
| 320 | unsigned long result = offset & ~31UL; | ||
| 321 | unsigned long tmp; | ||
| 322 | |||
| 323 | if (offset >= size) | ||
| 324 | return size; | ||
| 325 | size -= result; | ||
| 326 | offset &= 31UL; | ||
| 327 | if (offset) { | ||
| 328 | tmp = *(p++); | ||
| 329 | tmp |= ~0UL >> (32-offset); | ||
| 330 | if (size < 32) | ||
| 331 | goto found_first; | ||
| 332 | if (~tmp) | ||
| 333 | goto found_middle; | ||
| 334 | size -= 32; | ||
| 335 | result += 32; | ||
| 336 | } | ||
| 337 | while (size & ~31UL) { | ||
| 338 | if (~(tmp = *(p++))) | ||
| 339 | goto found_middle; | ||
| 340 | result += 32; | ||
| 341 | size -= 32; | ||
| 342 | } | ||
| 343 | if (!size) | ||
| 344 | return result; | ||
| 345 | tmp = *p; | ||
| 346 | |||
| 347 | found_first: | ||
| 348 | tmp |= ~0UL << size; | ||
| 349 | if (tmp == ~0UL) /* Are any bits zero? */ | ||
| 350 | return result + size; /* Nope. */ | ||
| 351 | found_middle: | ||
| 352 | return result + ffz(tmp); | ||
| 353 | } | ||
| 354 | |||
| 355 | /* | ||
| 356 | * Linus sez that gcc can optimize the following correctly, we'll see if this | ||
| 357 | * holds on the Sparc as it does for the ALPHA. | ||
| 358 | */ | ||
| 359 | #define find_first_zero_bit(addr, size) \ | ||
| 360 | find_next_zero_bit((addr), (size), 0) | ||
| 361 | |||
| 362 | /** | ||
| 363 | * find_next_bit - find the first set bit in a memory region | ||
| 364 | * @addr: The address to base the search on | ||
| 365 | * @offset: The bitnumber to start searching at | ||
| 366 | * @size: The maximum size to search | ||
| 367 | * | ||
| 368 | * Scheduler induced bitop, do not use. | ||
| 369 | */ | ||
| 370 | static inline int find_next_bit(const unsigned long *addr, int size, int offset) | ||
| 371 | { | ||
| 372 | const unsigned long *p = addr + (offset >> 5); | ||
| 373 | int num = offset & ~0x1f; | ||
| 374 | unsigned long word; | ||
| 375 | |||
| 376 | word = *p++; | ||
| 377 | word &= ~((1 << (offset & 0x1f)) - 1); | ||
| 378 | while (num < size) { | ||
| 379 | if (word != 0) { | ||
| 380 | return __ffs(word) + num; | ||
| 381 | } | ||
| 382 | word = *p++; | ||
| 383 | num += 0x20; | ||
| 384 | } | ||
| 385 | return num; | ||
| 386 | } | ||
| 387 | |||
| 388 | /** | ||
| 389 | * find_first_bit - find the first set bit in a memory region | ||
| 390 | * @addr: The address to start the search at | ||
| 391 | * @size: The maximum size to search | ||
| 392 | * | ||
| 393 | * Returns the bit-number of the first set bit, not the number of the byte | ||
| 394 | * containing a bit. | ||
| 395 | */ | ||
| 396 | #define find_first_bit(addr, size) \ | ||
| 397 | find_next_bit((addr), (size), 0) | ||
| 398 | |||
| 399 | /* | ||
| 400 | */ | ||
| 401 | static inline int test_le_bit(int nr, __const__ unsigned long * addr) | ||
| 402 | { | ||
| 403 | __const__ unsigned char *ADDR = (__const__ unsigned char *) addr; | ||
| 404 | return (ADDR[nr >> 3] >> (nr & 7)) & 1; | ||
| 405 | } | ||
| 406 | |||
| 407 | /* | ||
| 408 | * non-atomic versions | ||
| 409 | */ | ||
| 410 | static inline void __set_le_bit(int nr, unsigned long *addr) | ||
| 411 | { | ||
| 412 | unsigned char *ADDR = (unsigned char *)addr; | ||
| 413 | |||
| 414 | ADDR += nr >> 3; | ||
| 415 | *ADDR |= 1 << (nr & 0x07); | ||
| 416 | } | ||
| 417 | |||
| 418 | static inline void __clear_le_bit(int nr, unsigned long *addr) | ||
| 419 | { | ||
| 420 | unsigned char *ADDR = (unsigned char *)addr; | ||
| 421 | |||
| 422 | ADDR += nr >> 3; | ||
| 423 | *ADDR &= ~(1 << (nr & 0x07)); | ||
| 424 | } | ||
| 425 | |||
| 426 | static inline int __test_and_set_le_bit(int nr, unsigned long *addr) | ||
| 427 | { | ||
| 428 | int mask, retval; | ||
| 429 | unsigned char *ADDR = (unsigned char *)addr; | ||
| 430 | |||
| 431 | ADDR += nr >> 3; | ||
| 432 | mask = 1 << (nr & 0x07); | ||
| 433 | retval = (mask & *ADDR) != 0; | ||
| 434 | *ADDR |= mask; | ||
| 435 | return retval; | ||
| 436 | } | ||
| 437 | |||
| 438 | static inline int __test_and_clear_le_bit(int nr, unsigned long *addr) | ||
| 439 | { | ||
| 440 | int mask, retval; | ||
| 441 | unsigned char *ADDR = (unsigned char *)addr; | ||
| 442 | |||
| 443 | ADDR += nr >> 3; | ||
| 444 | mask = 1 << (nr & 0x07); | ||
| 445 | retval = (mask & *ADDR) != 0; | ||
| 446 | *ADDR &= ~mask; | ||
| 447 | return retval; | ||
| 448 | } | ||
| 449 | |||
| 450 | static inline unsigned long find_next_zero_le_bit(const unsigned long *addr, | ||
| 451 | unsigned long size, unsigned long offset) | ||
| 452 | { | ||
| 453 | const unsigned long *p = addr + (offset >> 5); | ||
| 454 | unsigned long result = offset & ~31UL; | ||
| 455 | unsigned long tmp; | ||
| 456 | |||
| 457 | if (offset >= size) | ||
| 458 | return size; | ||
| 459 | size -= result; | ||
| 460 | offset &= 31UL; | ||
| 461 | if(offset) { | ||
| 462 | tmp = *(p++); | ||
| 463 | tmp |= __swab32(~0UL >> (32-offset)); | ||
| 464 | if(size < 32) | ||
| 465 | goto found_first; | ||
| 466 | if(~tmp) | ||
| 467 | goto found_middle; | ||
| 468 | size -= 32; | ||
| 469 | result += 32; | ||
| 470 | } | ||
| 471 | while(size & ~31UL) { | ||
| 472 | if(~(tmp = *(p++))) | ||
| 473 | goto found_middle; | ||
| 474 | result += 32; | ||
| 475 | size -= 32; | ||
| 476 | } | ||
| 477 | if(!size) | ||
| 478 | return result; | ||
| 479 | tmp = *p; | ||
| 480 | |||
| 481 | found_first: | ||
| 482 | tmp = __swab32(tmp) | (~0UL << size); | ||
| 483 | if (tmp == ~0UL) /* Are any bits zero? */ | ||
| 484 | return result + size; /* Nope. */ | ||
| 485 | return result + ffz(tmp); | ||
| 486 | |||
| 487 | found_middle: | ||
| 488 | return result + ffz(__swab32(tmp)); | ||
| 489 | } | ||
| 490 | |||
| 491 | #define find_first_zero_le_bit(addr, size) \ | ||
| 492 | find_next_zero_le_bit((addr), (size), 0) | ||
| 493 | |||
| 494 | #define ext2_set_bit(nr,addr) \ | ||
| 495 | __test_and_set_le_bit((nr),(unsigned long *)(addr)) | ||
| 496 | #define ext2_clear_bit(nr,addr) \ | ||
| 497 | __test_and_clear_le_bit((nr),(unsigned long *)(addr)) | ||
| 498 | |||
| 499 | #define ext2_set_bit_atomic(lock, nr, addr) \ | ||
| 500 | ({ \ | ||
| 501 | int ret; \ | ||
| 502 | spin_lock(lock); \ | ||
| 503 | ret = ext2_set_bit((nr), (unsigned long *)(addr)); \ | ||
| 504 | spin_unlock(lock); \ | ||
| 505 | ret; \ | ||
| 506 | }) | ||
| 507 | |||
| 508 | #define ext2_clear_bit_atomic(lock, nr, addr) \ | ||
| 509 | ({ \ | ||
| 510 | int ret; \ | ||
| 511 | spin_lock(lock); \ | ||
| 512 | ret = ext2_clear_bit((nr), (unsigned long *)(addr)); \ | ||
| 513 | spin_unlock(lock); \ | ||
| 514 | ret; \ | ||
| 515 | }) | ||
| 516 | |||
| 517 | #define ext2_test_bit(nr,addr) \ | ||
| 518 | test_le_bit((nr),(unsigned long *)(addr)) | ||
| 519 | #define ext2_find_first_zero_bit(addr, size) \ | ||
| 520 | find_first_zero_le_bit((unsigned long *)(addr), (size)) | ||
| 521 | #define ext2_find_next_zero_bit(addr, size, off) \ | ||
| 522 | find_next_zero_le_bit((unsigned long *)(addr), (size), (off)) | ||
| 523 | |||
| 524 | /* Bitmap functions for the minix filesystem. */ | ||
| 525 | #define minix_test_and_set_bit(nr,addr) \ | ||
| 526 | test_and_set_bit((nr),(unsigned long *)(addr)) | ||
| 527 | #define minix_set_bit(nr,addr) \ | ||
| 528 | set_bit((nr),(unsigned long *)(addr)) | ||
| 529 | #define minix_test_and_clear_bit(nr,addr) \ | ||
| 530 | test_and_clear_bit((nr),(unsigned long *)(addr)) | ||
| 531 | #define minix_test_bit(nr,addr) \ | ||
| 532 | test_bit((nr),(unsigned long *)(addr)) | ||
| 533 | #define minix_find_first_zero_bit(addr,size) \ | ||
| 534 | find_first_zero_bit((unsigned long *)(addr),(size)) | ||
| 535 | 171 | ||
| 536 | #endif /* __KERNEL__ */ | 172 | #endif /* __KERNEL__ */ |
| 537 | 173 | ||
diff --git a/include/asm-sparc/page.h b/include/asm-sparc/page.h index 9122684f6c1e..ec3274b7ddf4 100644 --- a/include/asm-sparc/page.h +++ b/include/asm-sparc/page.h | |||
| @@ -152,8 +152,7 @@ extern unsigned long pfn_base; | |||
| 152 | #define virt_to_phys __pa | 152 | #define virt_to_phys __pa |
| 153 | #define phys_to_virt __va | 153 | #define phys_to_virt __va |
| 154 | 154 | ||
| 155 | #define pfn_to_page(pfn) (mem_map + ((pfn)-(pfn_base))) | 155 | #define ARCH_PFN_OFFSET (pfn_base) |
| 156 | #define page_to_pfn(page) ((unsigned long)(((page) - mem_map) + pfn_base)) | ||
| 157 | #define virt_to_page(kaddr) (mem_map + ((((unsigned long)(kaddr)-PAGE_OFFSET)>>PAGE_SHIFT))) | 156 | #define virt_to_page(kaddr) (mem_map + ((((unsigned long)(kaddr)-PAGE_OFFSET)>>PAGE_SHIFT))) |
| 158 | 157 | ||
| 159 | #define pfn_valid(pfn) (((pfn) >= (pfn_base)) && (((pfn)-(pfn_base)) < max_mapnr)) | 158 | #define pfn_valid(pfn) (((pfn) >= (pfn_base)) && (((pfn)-(pfn_base)) < max_mapnr)) |
| @@ -164,6 +163,7 @@ extern unsigned long pfn_base; | |||
| 164 | 163 | ||
| 165 | #endif /* __KERNEL__ */ | 164 | #endif /* __KERNEL__ */ |
| 166 | 165 | ||
| 166 | #include <asm-generic/memory_model.h> | ||
| 167 | #include <asm-generic/page.h> | 167 | #include <asm-generic/page.h> |
| 168 | 168 | ||
| 169 | #endif /* _SPARC_PAGE_H */ | 169 | #endif /* _SPARC_PAGE_H */ |
diff --git a/include/asm-sparc64/bitops.h b/include/asm-sparc64/bitops.h index 6efc0162fb09..71944b0f09de 100644 --- a/include/asm-sparc64/bitops.h +++ b/include/asm-sparc64/bitops.h | |||
| @@ -18,58 +18,7 @@ extern void set_bit(unsigned long nr, volatile unsigned long *addr); | |||
| 18 | extern void clear_bit(unsigned long nr, volatile unsigned long *addr); | 18 | extern void clear_bit(unsigned long nr, volatile unsigned long *addr); |
| 19 | extern void change_bit(unsigned long nr, volatile unsigned long *addr); | 19 | extern void change_bit(unsigned long nr, volatile unsigned long *addr); |
| 20 | 20 | ||
| 21 | /* "non-atomic" versions... */ | 21 | #include <asm-generic/bitops/non-atomic.h> |
| 22 | |||
| 23 | static inline void __set_bit(int nr, volatile unsigned long *addr) | ||
| 24 | { | ||
| 25 | unsigned long *m = ((unsigned long *)addr) + (nr >> 6); | ||
| 26 | |||
| 27 | *m |= (1UL << (nr & 63)); | ||
| 28 | } | ||
| 29 | |||
| 30 | static inline void __clear_bit(int nr, volatile unsigned long *addr) | ||
| 31 | { | ||
| 32 | unsigned long *m = ((unsigned long *)addr) + (nr >> 6); | ||
| 33 | |||
| 34 | *m &= ~(1UL << (nr & 63)); | ||
| 35 | } | ||
| 36 | |||
| 37 | static inline void __change_bit(int nr, volatile unsigned long *addr) | ||
| 38 | { | ||
| 39 | unsigned long *m = ((unsigned long *)addr) + (nr >> 6); | ||
| 40 | |||
| 41 | *m ^= (1UL << (nr & 63)); | ||
| 42 | } | ||
| 43 | |||
| 44 | static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) | ||
| 45 | { | ||
| 46 | unsigned long *m = ((unsigned long *)addr) + (nr >> 6); | ||
| 47 | unsigned long old = *m; | ||
| 48 | unsigned long mask = (1UL << (nr & 63)); | ||
| 49 | |||
| 50 | *m = (old | mask); | ||
| 51 | return ((old & mask) != 0); | ||
| 52 | } | ||
| 53 | |||
| 54 | static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) | ||
| 55 | { | ||
| 56 | unsigned long *m = ((unsigned long *)addr) + (nr >> 6); | ||
| 57 | unsigned long old = *m; | ||
| 58 | unsigned long mask = (1UL << (nr & 63)); | ||
| 59 | |||
| 60 | *m = (old & ~mask); | ||
| 61 | return ((old & mask) != 0); | ||
| 62 | } | ||
| 63 | |||
| 64 | static inline int __test_and_change_bit(int nr, volatile unsigned long *addr) | ||
| 65 | { | ||
| 66 | unsigned long *m = ((unsigned long *)addr) + (nr >> 6); | ||
| 67 | unsigned long old = *m; | ||
| 68 | unsigned long mask = (1UL << (nr & 63)); | ||
| 69 | |||
| 70 | *m = (old ^ mask); | ||
| 71 | return ((old & mask) != 0); | ||
| 72 | } | ||
| 73 | 22 | ||
| 74 | #ifdef CONFIG_SMP | 23 | #ifdef CONFIG_SMP |
| 75 | #define smp_mb__before_clear_bit() membar_storeload_loadload() | 24 | #define smp_mb__before_clear_bit() membar_storeload_loadload() |
| @@ -79,78 +28,15 @@ static inline int __test_and_change_bit(int nr, volatile unsigned long *addr) | |||
| 79 | #define smp_mb__after_clear_bit() barrier() | 28 | #define smp_mb__after_clear_bit() barrier() |
| 80 | #endif | 29 | #endif |
| 81 | 30 | ||
| 82 | static inline int test_bit(int nr, __const__ volatile unsigned long *addr) | 31 | #include <asm-generic/bitops/ffz.h> |
| 83 | { | 32 | #include <asm-generic/bitops/__ffs.h> |
| 84 | return (1UL & (addr[nr >> 6] >> (nr & 63))) != 0UL; | 33 | #include <asm-generic/bitops/fls.h> |
| 85 | } | 34 | #include <asm-generic/bitops/fls64.h> |
| 86 | |||
| 87 | /* The easy/cheese version for now. */ | ||
| 88 | static inline unsigned long ffz(unsigned long word) | ||
| 89 | { | ||
| 90 | unsigned long result; | ||
| 91 | |||
| 92 | result = 0; | ||
| 93 | while(word & 1) { | ||
| 94 | result++; | ||
| 95 | word >>= 1; | ||
| 96 | } | ||
| 97 | return result; | ||
| 98 | } | ||
| 99 | |||
| 100 | /** | ||
| 101 | * __ffs - find first bit in word. | ||
| 102 | * @word: The word to search | ||
| 103 | * | ||
| 104 | * Undefined if no bit exists, so code should check against 0 first. | ||
| 105 | */ | ||
| 106 | static inline unsigned long __ffs(unsigned long word) | ||
| 107 | { | ||
| 108 | unsigned long result = 0; | ||
| 109 | |||
| 110 | while (!(word & 1UL)) { | ||
| 111 | result++; | ||
| 112 | word >>= 1; | ||
| 113 | } | ||
| 114 | return result; | ||
| 115 | } | ||
| 116 | |||
| 117 | /* | ||
| 118 | * fls: find last bit set. | ||
| 119 | */ | ||
| 120 | |||
| 121 | #define fls(x) generic_fls(x) | ||
| 122 | #define fls64(x) generic_fls64(x) | ||
| 123 | 35 | ||
| 124 | #ifdef __KERNEL__ | 36 | #ifdef __KERNEL__ |
| 125 | 37 | ||
| 126 | /* | 38 | #include <asm-generic/bitops/sched.h> |
| 127 | * Every architecture must define this function. It's the fastest | 39 | #include <asm-generic/bitops/ffs.h> |
| 128 | * way of searching a 140-bit bitmap where the first 100 bits are | ||
| 129 | * unlikely to be set. It's guaranteed that at least one of the 140 | ||
| 130 | * bits is cleared. | ||
| 131 | */ | ||
| 132 | static inline int sched_find_first_bit(unsigned long *b) | ||
| 133 | { | ||
| 134 | if (unlikely(b[0])) | ||
| 135 | return __ffs(b[0]); | ||
| 136 | if (unlikely(((unsigned int)b[1]))) | ||
| 137 | return __ffs(b[1]) + 64; | ||
| 138 | if (b[1] >> 32) | ||
| 139 | return __ffs(b[1] >> 32) + 96; | ||
| 140 | return __ffs(b[2]) + 128; | ||
| 141 | } | ||
| 142 | |||
| 143 | /* | ||
| 144 | * ffs: find first bit set. This is defined the same way as | ||
| 145 | * the libc and compiler builtin ffs routines, therefore | ||
| 146 | * differs in spirit from the above ffz (man ffs). | ||
| 147 | */ | ||
| 148 | static inline int ffs(int x) | ||
| 149 | { | ||
| 150 | if (!x) | ||
| 151 | return 0; | ||
| 152 | return __ffs((unsigned long)x) + 1; | ||
| 153 | } | ||
| 154 | 40 | ||
| 155 | /* | 41 | /* |
| 156 | * hweightN: returns the hamming weight (i.e. the number | 42 | * hweightN: returns the hamming weight (i.e. the number |
| @@ -193,102 +79,23 @@ static inline unsigned int hweight8(unsigned int w) | |||
| 193 | 79 | ||
| 194 | #else | 80 | #else |
| 195 | 81 | ||
| 196 | #define hweight64(x) generic_hweight64(x) | 82 | #include <asm-generic/bitops/hweight.h> |
| 197 | #define hweight32(x) generic_hweight32(x) | ||
| 198 | #define hweight16(x) generic_hweight16(x) | ||
| 199 | #define hweight8(x) generic_hweight8(x) | ||
| 200 | 83 | ||
| 201 | #endif | 84 | #endif |
| 202 | #endif /* __KERNEL__ */ | 85 | #endif /* __KERNEL__ */ |
| 203 | 86 | ||
| 204 | /** | 87 | #include <asm-generic/bitops/find.h> |
| 205 | * find_next_bit - find the next set bit in a memory region | ||
| 206 | * @addr: The address to base the search on | ||
| 207 | * @offset: The bitnumber to start searching at | ||
| 208 | * @size: The maximum size to search | ||
| 209 | */ | ||
| 210 | extern unsigned long find_next_bit(const unsigned long *, unsigned long, | ||
| 211 | unsigned long); | ||
| 212 | |||
| 213 | /** | ||
| 214 | * find_first_bit - find the first set bit in a memory region | ||
| 215 | * @addr: The address to start the search at | ||
| 216 | * @size: The maximum size to search | ||
| 217 | * | ||
| 218 | * Returns the bit-number of the first set bit, not the number of the byte | ||
| 219 | * containing a bit. | ||
| 220 | */ | ||
| 221 | #define find_first_bit(addr, size) \ | ||
| 222 | find_next_bit((addr), (size), 0) | ||
| 223 | |||
| 224 | /* find_next_zero_bit() finds the first zero bit in a bit string of length | ||
| 225 | * 'size' bits, starting the search at bit 'offset'. This is largely based | ||
| 226 | * on Linus's ALPHA routines, which are pretty portable BTW. | ||
| 227 | */ | ||
| 228 | |||
| 229 | extern unsigned long find_next_zero_bit(const unsigned long *, | ||
| 230 | unsigned long, unsigned long); | ||
| 231 | |||
| 232 | #define find_first_zero_bit(addr, size) \ | ||
| 233 | find_next_zero_bit((addr), (size), 0) | ||
| 234 | |||
| 235 | #define test_and_set_le_bit(nr,addr) \ | ||
| 236 | test_and_set_bit((nr) ^ 0x38, (addr)) | ||
| 237 | #define test_and_clear_le_bit(nr,addr) \ | ||
| 238 | test_and_clear_bit((nr) ^ 0x38, (addr)) | ||
| 239 | |||
| 240 | static inline int test_le_bit(int nr, __const__ unsigned long * addr) | ||
| 241 | { | ||
| 242 | int mask; | ||
| 243 | __const__ unsigned char *ADDR = (__const__ unsigned char *) addr; | ||
| 244 | |||
| 245 | ADDR += nr >> 3; | ||
| 246 | mask = 1 << (nr & 0x07); | ||
| 247 | return ((mask & *ADDR) != 0); | ||
| 248 | } | ||
| 249 | |||
| 250 | #define find_first_zero_le_bit(addr, size) \ | ||
| 251 | find_next_zero_le_bit((addr), (size), 0) | ||
| 252 | |||
| 253 | extern unsigned long find_next_zero_le_bit(unsigned long *, unsigned long, unsigned long); | ||
| 254 | 88 | ||
| 255 | #ifdef __KERNEL__ | 89 | #ifdef __KERNEL__ |
| 256 | 90 | ||
| 257 | #define __set_le_bit(nr, addr) \ | 91 | #include <asm-generic/bitops/ext2-non-atomic.h> |
| 258 | __set_bit((nr) ^ 0x38, (addr)) | ||
| 259 | #define __clear_le_bit(nr, addr) \ | ||
| 260 | __clear_bit((nr) ^ 0x38, (addr)) | ||
| 261 | #define __test_and_clear_le_bit(nr, addr) \ | ||
| 262 | __test_and_clear_bit((nr) ^ 0x38, (addr)) | ||
| 263 | #define __test_and_set_le_bit(nr, addr) \ | ||
| 264 | __test_and_set_bit((nr) ^ 0x38, (addr)) | ||
| 265 | 92 | ||
| 266 | #define ext2_set_bit(nr,addr) \ | ||
| 267 | __test_and_set_le_bit((nr),(unsigned long *)(addr)) | ||
| 268 | #define ext2_set_bit_atomic(lock,nr,addr) \ | 93 | #define ext2_set_bit_atomic(lock,nr,addr) \ |
| 269 | test_and_set_le_bit((nr),(unsigned long *)(addr)) | 94 | test_and_set_bit((nr) ^ 0x38,(unsigned long *)(addr)) |
| 270 | #define ext2_clear_bit(nr,addr) \ | ||
| 271 | __test_and_clear_le_bit((nr),(unsigned long *)(addr)) | ||
| 272 | #define ext2_clear_bit_atomic(lock,nr,addr) \ | 95 | #define ext2_clear_bit_atomic(lock,nr,addr) \ |
| 273 | test_and_clear_le_bit((nr),(unsigned long *)(addr)) | 96 | test_and_clear_bit((nr) ^ 0x38,(unsigned long *)(addr)) |
| 274 | #define ext2_test_bit(nr,addr) \ | ||
| 275 | test_le_bit((nr),(unsigned long *)(addr)) | ||
| 276 | #define ext2_find_first_zero_bit(addr, size) \ | ||
| 277 | find_first_zero_le_bit((unsigned long *)(addr), (size)) | ||
| 278 | #define ext2_find_next_zero_bit(addr, size, off) \ | ||
| 279 | find_next_zero_le_bit((unsigned long *)(addr), (size), (off)) | ||
| 280 | 97 | ||
| 281 | /* Bitmap functions for the minix filesystem. */ | 98 | #include <asm-generic/bitops/minix.h> |
| 282 | #define minix_test_and_set_bit(nr,addr) \ | ||
| 283 | test_and_set_bit((nr),(unsigned long *)(addr)) | ||
| 284 | #define minix_set_bit(nr,addr) \ | ||
| 285 | set_bit((nr),(unsigned long *)(addr)) | ||
| 286 | #define minix_test_and_clear_bit(nr,addr) \ | ||
| 287 | test_and_clear_bit((nr),(unsigned long *)(addr)) | ||
| 288 | #define minix_test_bit(nr,addr) \ | ||
| 289 | test_bit((nr),(unsigned long *)(addr)) | ||
| 290 | #define minix_find_first_zero_bit(addr,size) \ | ||
| 291 | find_first_zero_bit((unsigned long *)(addr),(size)) | ||
| 292 | 99 | ||
| 293 | #endif /* __KERNEL__ */ | 100 | #endif /* __KERNEL__ */ |
| 294 | 101 | ||
diff --git a/include/asm-sparc64/futex.h b/include/asm-sparc64/futex.h index 34c4b43d3f98..cd340a233156 100644 --- a/include/asm-sparc64/futex.h +++ b/include/asm-sparc64/futex.h | |||
| @@ -83,4 +83,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) | |||
| 83 | return ret; | 83 | return ret; |
| 84 | } | 84 | } |
| 85 | 85 | ||
| 86 | static inline int | ||
| 87 | futex_atomic_cmpxchg_inuser(int __user *uaddr, int oldval, int newval) | ||
| 88 | { | ||
| 89 | return -ENOSYS; | ||
| 90 | } | ||
| 91 | |||
| 86 | #endif /* !(_SPARC64_FUTEX_H) */ | 92 | #endif /* !(_SPARC64_FUTEX_H) */ |
diff --git a/include/asm-sparc64/kdebug.h b/include/asm-sparc64/kdebug.h index 6321f5a0198d..4040d127ac3e 100644 --- a/include/asm-sparc64/kdebug.h +++ b/include/asm-sparc64/kdebug.h | |||
| @@ -15,12 +15,9 @@ struct die_args { | |||
| 15 | int signr; | 15 | int signr; |
| 16 | }; | 16 | }; |
| 17 | 17 | ||
| 18 | /* Note - you should never unregister because that can race with NMIs. | 18 | extern int register_die_notifier(struct notifier_block *); |
| 19 | * If you really want to do it first unregister - then synchronize_sched | 19 | extern int unregister_die_notifier(struct notifier_block *); |
| 20 | * - then free. | 20 | extern struct atomic_notifier_head sparc64die_chain; |
| 21 | */ | ||
| 22 | int register_die_notifier(struct notifier_block *nb); | ||
| 23 | extern struct notifier_block *sparc64die_chain; | ||
| 24 | 21 | ||
| 25 | extern void bad_trap(struct pt_regs *, long); | 22 | extern void bad_trap(struct pt_regs *, long); |
| 26 | 23 | ||
| @@ -46,7 +43,7 @@ static inline int notify_die(enum die_val val,char *str, struct pt_regs *regs, | |||
| 46 | .trapnr = trap, | 43 | .trapnr = trap, |
| 47 | .signr = sig }; | 44 | .signr = sig }; |
| 48 | 45 | ||
| 49 | return notifier_call_chain(&sparc64die_chain, val, &args); | 46 | return atomic_notifier_call_chain(&sparc64die_chain, val, &args); |
| 50 | } | 47 | } |
| 51 | 48 | ||
| 52 | #endif | 49 | #endif |
diff --git a/include/asm-sparc64/page.h b/include/asm-sparc64/page.h index 66fe4ac59fd6..aabb21906724 100644 --- a/include/asm-sparc64/page.h +++ b/include/asm-sparc64/page.h | |||
| @@ -111,6 +111,8 @@ typedef unsigned long pgprot_t; | |||
| 111 | (_AC(0x0000000070000000,UL)) : \ | 111 | (_AC(0x0000000070000000,UL)) : \ |
| 112 | (_AC(0xfffff80000000000,UL) + (1UL << 32UL))) | 112 | (_AC(0xfffff80000000000,UL) + (1UL << 32UL))) |
| 113 | 113 | ||
| 114 | #include <asm-generic/memory_model.h> | ||
| 115 | |||
| 114 | #endif /* !(__ASSEMBLY__) */ | 116 | #endif /* !(__ASSEMBLY__) */ |
| 115 | 117 | ||
| 116 | /* to align the pointer to the (next) page boundary */ | 118 | /* to align the pointer to the (next) page boundary */ |
diff --git a/include/asm-um/page.h b/include/asm-um/page.h index 0229814af31e..41364330aff1 100644 --- a/include/asm-um/page.h +++ b/include/asm-um/page.h | |||
| @@ -106,9 +106,6 @@ extern unsigned long uml_physmem; | |||
| 106 | #define __pa(virt) to_phys((void *) (unsigned long) (virt)) | 106 | #define __pa(virt) to_phys((void *) (unsigned long) (virt)) |
| 107 | #define __va(phys) to_virt((unsigned long) (phys)) | 107 | #define __va(phys) to_virt((unsigned long) (phys)) |
| 108 | 108 | ||
| 109 | #define page_to_pfn(page) ((page) - mem_map) | ||
| 110 | #define pfn_to_page(pfn) (mem_map + (pfn)) | ||
| 111 | |||
| 112 | #define phys_to_pfn(p) ((p) >> PAGE_SHIFT) | 109 | #define phys_to_pfn(p) ((p) >> PAGE_SHIFT) |
| 113 | #define pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT) | 110 | #define pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT) |
| 114 | 111 | ||
| @@ -121,6 +118,7 @@ extern struct page *arch_validate(struct page *page, gfp_t mask, int order); | |||
| 121 | extern void arch_free_page(struct page *page, int order); | 118 | extern void arch_free_page(struct page *page, int order); |
| 122 | #define HAVE_ARCH_FREE_PAGE | 119 | #define HAVE_ARCH_FREE_PAGE |
| 123 | 120 | ||
| 121 | #include <asm-generic/memory_model.h> | ||
| 124 | #include <asm-generic/page.h> | 122 | #include <asm-generic/page.h> |
| 125 | 123 | ||
| 126 | #endif | 124 | #endif |
diff --git a/include/asm-um/uaccess.h b/include/asm-um/uaccess.h index 2ee028b8de9d..4e460d6f5ac8 100644 --- a/include/asm-um/uaccess.h +++ b/include/asm-um/uaccess.h | |||
| @@ -41,16 +41,16 @@ | |||
| 41 | 41 | ||
| 42 | #define __get_user(x, ptr) \ | 42 | #define __get_user(x, ptr) \ |
| 43 | ({ \ | 43 | ({ \ |
| 44 | const __typeof__(ptr) __private_ptr = ptr; \ | 44 | const __typeof__(ptr) __private_ptr = ptr; \ |
| 45 | __typeof__(*(__private_ptr)) __private_val; \ | 45 | __typeof__(x) __private_val; \ |
| 46 | int __private_ret = -EFAULT; \ | 46 | int __private_ret = -EFAULT; \ |
| 47 | (x) = (__typeof__(*(__private_ptr)))0; \ | 47 | (x) = (__typeof__(*(__private_ptr)))0; \ |
| 48 | if (__copy_from_user(&__private_val, (__private_ptr), \ | 48 | if (__copy_from_user((void *) &__private_val, (__private_ptr), \ |
| 49 | sizeof(*(__private_ptr))) == 0) {\ | 49 | sizeof(*(__private_ptr))) == 0) { \ |
| 50 | (x) = (__typeof__(*(__private_ptr))) __private_val; \ | 50 | (x) = (__typeof__(*(__private_ptr))) __private_val; \ |
| 51 | __private_ret = 0; \ | 51 | __private_ret = 0; \ |
| 52 | } \ | 52 | } \ |
| 53 | __private_ret; \ | 53 | __private_ret; \ |
| 54 | }) | 54 | }) |
| 55 | 55 | ||
| 56 | #define get_user(x, ptr) \ | 56 | #define get_user(x, ptr) \ |
| @@ -89,14 +89,3 @@ struct exception_table_entry | |||
| 89 | }; | 89 | }; |
| 90 | 90 | ||
| 91 | #endif | 91 | #endif |
| 92 | |||
| 93 | /* | ||
| 94 | * Overrides for Emacs so that we follow Linus's tabbing style. | ||
| 95 | * Emacs will notice this stuff at the end of the file and automatically | ||
| 96 | * adjust the settings for this buffer only. This must remain at the end | ||
| 97 | * of the file. | ||
| 98 | * --------------------------------------------------------------------------- | ||
| 99 | * Local variables: | ||
| 100 | * c-file-style: "linux" | ||
| 101 | * End: | ||
| 102 | */ | ||
diff --git a/include/asm-v850/bitops.h b/include/asm-v850/bitops.h index 609b9e87222a..1f6fd5ab4177 100644 --- a/include/asm-v850/bitops.h +++ b/include/asm-v850/bitops.h | |||
| @@ -22,25 +22,11 @@ | |||
| 22 | 22 | ||
| 23 | #ifdef __KERNEL__ | 23 | #ifdef __KERNEL__ |
| 24 | 24 | ||
| 25 | /* | 25 | #include <asm-generic/bitops/ffz.h> |
| 26 | * The __ functions are not atomic | ||
| 27 | */ | ||
| 28 | 26 | ||
| 29 | /* | 27 | /* |
| 30 | * ffz = Find First Zero in word. Undefined if no zero exists, | 28 | * The __ functions are not atomic |
| 31 | * so code should check against ~0UL first.. | ||
| 32 | */ | 29 | */ |
| 33 | static inline unsigned long ffz (unsigned long word) | ||
| 34 | { | ||
| 35 | unsigned long result = 0; | ||
| 36 | |||
| 37 | while (word & 1) { | ||
| 38 | result++; | ||
| 39 | word >>= 1; | ||
| 40 | } | ||
| 41 | return result; | ||
| 42 | } | ||
| 43 | |||
| 44 | 30 | ||
| 45 | /* In the following constant-bit-op macros, a "g" constraint is used when | 31 | /* In the following constant-bit-op macros, a "g" constraint is used when |
| 46 | we really need an integer ("i" constraint). This is to avoid | 32 | we really need an integer ("i" constraint). This is to avoid |
| @@ -153,203 +139,19 @@ static inline int __test_bit (int nr, const void *addr) | |||
| 153 | #define smp_mb__before_clear_bit() barrier () | 139 | #define smp_mb__before_clear_bit() barrier () |
| 154 | #define smp_mb__after_clear_bit() barrier () | 140 | #define smp_mb__after_clear_bit() barrier () |
| 155 | 141 | ||
| 142 | #include <asm-generic/bitops/ffs.h> | ||
| 143 | #include <asm-generic/bitops/fls.h> | ||
| 144 | #include <asm-generic/bitops/fls64.h> | ||
| 145 | #include <asm-generic/bitops/__ffs.h> | ||
| 146 | #include <asm-generic/bitops/find.h> | ||
| 147 | #include <asm-generic/bitops/sched.h> | ||
| 148 | #include <asm-generic/bitops/hweight.h> | ||
| 156 | 149 | ||
| 157 | #define find_first_zero_bit(addr, size) \ | 150 | #include <asm-generic/bitops/ext2-non-atomic.h> |
| 158 | find_next_zero_bit ((addr), (size), 0) | ||
| 159 | |||
| 160 | static inline int find_next_zero_bit(const void *addr, int size, int offset) | ||
| 161 | { | ||
| 162 | unsigned long *p = ((unsigned long *) addr) + (offset >> 5); | ||
| 163 | unsigned long result = offset & ~31UL; | ||
| 164 | unsigned long tmp; | ||
| 165 | |||
| 166 | if (offset >= size) | ||
| 167 | return size; | ||
| 168 | size -= result; | ||
| 169 | offset &= 31UL; | ||
| 170 | if (offset) { | ||
| 171 | tmp = * (p++); | ||
| 172 | tmp |= ~0UL >> (32-offset); | ||
| 173 | if (size < 32) | ||
| 174 | goto found_first; | ||
| 175 | if (~tmp) | ||
| 176 | goto found_middle; | ||
| 177 | size -= 32; | ||
| 178 | result += 32; | ||
| 179 | } | ||
| 180 | while (size & ~31UL) { | ||
| 181 | if (~ (tmp = * (p++))) | ||
| 182 | goto found_middle; | ||
| 183 | result += 32; | ||
| 184 | size -= 32; | ||
| 185 | } | ||
| 186 | if (!size) | ||
| 187 | return result; | ||
| 188 | tmp = *p; | ||
| 189 | |||
| 190 | found_first: | ||
| 191 | tmp |= ~0UL << size; | ||
| 192 | found_middle: | ||
| 193 | return result + ffz (tmp); | ||
| 194 | } | ||
| 195 | |||
| 196 | |||
| 197 | /* This is the same as generic_ffs, but we can't use that because it's | ||
| 198 | inline and the #include order mucks things up. */ | ||
| 199 | static inline int generic_ffs_for_find_next_bit(int x) | ||
| 200 | { | ||
| 201 | int r = 1; | ||
| 202 | |||
| 203 | if (!x) | ||
| 204 | return 0; | ||
| 205 | if (!(x & 0xffff)) { | ||
| 206 | x >>= 16; | ||
| 207 | r += 16; | ||
| 208 | } | ||
| 209 | if (!(x & 0xff)) { | ||
| 210 | x >>= 8; | ||
| 211 | r += 8; | ||
| 212 | } | ||
| 213 | if (!(x & 0xf)) { | ||
| 214 | x >>= 4; | ||
| 215 | r += 4; | ||
| 216 | } | ||
| 217 | if (!(x & 3)) { | ||
| 218 | x >>= 2; | ||
| 219 | r += 2; | ||
| 220 | } | ||
| 221 | if (!(x & 1)) { | ||
| 222 | x >>= 1; | ||
| 223 | r += 1; | ||
| 224 | } | ||
| 225 | return r; | ||
| 226 | } | ||
| 227 | |||
| 228 | /* | ||
| 229 | * Find next one bit in a bitmap reasonably efficiently. | ||
| 230 | */ | ||
| 231 | static __inline__ unsigned long find_next_bit(const unsigned long *addr, | ||
| 232 | unsigned long size, unsigned long offset) | ||
| 233 | { | ||
| 234 | unsigned int *p = ((unsigned int *) addr) + (offset >> 5); | ||
| 235 | unsigned int result = offset & ~31UL; | ||
| 236 | unsigned int tmp; | ||
| 237 | |||
| 238 | if (offset >= size) | ||
| 239 | return size; | ||
| 240 | size -= result; | ||
| 241 | offset &= 31UL; | ||
| 242 | if (offset) { | ||
| 243 | tmp = *p++; | ||
| 244 | tmp &= ~0UL << offset; | ||
| 245 | if (size < 32) | ||
| 246 | goto found_first; | ||
| 247 | if (tmp) | ||
| 248 | goto found_middle; | ||
| 249 | size -= 32; | ||
| 250 | result += 32; | ||
| 251 | } | ||
| 252 | while (size >= 32) { | ||
| 253 | if ((tmp = *p++) != 0) | ||
| 254 | goto found_middle; | ||
| 255 | result += 32; | ||
| 256 | size -= 32; | ||
| 257 | } | ||
| 258 | if (!size) | ||
| 259 | return result; | ||
| 260 | tmp = *p; | ||
| 261 | |||
| 262 | found_first: | ||
| 263 | tmp &= ~0UL >> (32 - size); | ||
| 264 | if (tmp == 0UL) /* Are any bits set? */ | ||
| 265 | return result + size; /* Nope. */ | ||
| 266 | found_middle: | ||
| 267 | return result + generic_ffs_for_find_next_bit(tmp); | ||
| 268 | } | ||
| 269 | |||
| 270 | /* | ||
| 271 | * find_first_bit - find the first set bit in a memory region | ||
| 272 | */ | ||
| 273 | #define find_first_bit(addr, size) \ | ||
| 274 | find_next_bit((addr), (size), 0) | ||
| 275 | |||
| 276 | |||
| 277 | #define ffs(x) generic_ffs (x) | ||
| 278 | #define fls(x) generic_fls (x) | ||
| 279 | #define fls64(x) generic_fls64(x) | ||
| 280 | #define __ffs(x) ffs(x) | ||
| 281 | |||
| 282 | |||
| 283 | /* | ||
| 284 | * This is just `generic_ffs' from <linux/bitops.h>, except that it assumes | ||
| 285 | * that at least one bit is set, and returns the real index of the bit | ||
| 286 | * (rather than the bit index + 1, like ffs does). | ||
| 287 | */ | ||
| 288 | static inline int sched_ffs(int x) | ||
| 289 | { | ||
| 290 | int r = 0; | ||
| 291 | |||
| 292 | if (!(x & 0xffff)) { | ||
| 293 | x >>= 16; | ||
| 294 | r += 16; | ||
| 295 | } | ||
| 296 | if (!(x & 0xff)) { | ||
| 297 | x >>= 8; | ||
| 298 | r += 8; | ||
| 299 | } | ||
| 300 | if (!(x & 0xf)) { | ||
| 301 | x >>= 4; | ||
| 302 | r += 4; | ||
| 303 | } | ||
| 304 | if (!(x & 3)) { | ||
| 305 | x >>= 2; | ||
| 306 | r += 2; | ||
| 307 | } | ||
| 308 | if (!(x & 1)) { | ||
| 309 | x >>= 1; | ||
| 310 | r += 1; | ||
| 311 | } | ||
| 312 | return r; | ||
| 313 | } | ||
| 314 | |||
| 315 | /* | ||
| 316 | * Every architecture must define this function. It's the fastest | ||
| 317 | * way of searching a 140-bit bitmap where the first 100 bits are | ||
| 318 | * unlikely to be set. It's guaranteed that at least one of the 140 | ||
| 319 | * bits is set. | ||
| 320 | */ | ||
| 321 | static inline int sched_find_first_bit(unsigned long *b) | ||
| 322 | { | ||
| 323 | unsigned offs = 0; | ||
| 324 | while (! *b) { | ||
| 325 | b++; | ||
| 326 | offs += 32; | ||
| 327 | } | ||
| 328 | return sched_ffs (*b) + offs; | ||
| 329 | } | ||
| 330 | |||
| 331 | /* | ||
| 332 | * hweightN: returns the hamming weight (i.e. the number | ||
| 333 | * of bits set) of a N-bit word | ||
| 334 | */ | ||
| 335 | #define hweight32(x) generic_hweight32 (x) | ||
| 336 | #define hweight16(x) generic_hweight16 (x) | ||
| 337 | #define hweight8(x) generic_hweight8 (x) | ||
| 338 | |||
| 339 | #define ext2_set_bit test_and_set_bit | ||
| 340 | #define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a) | 151 | #define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a) |
| 341 | #define ext2_clear_bit test_and_clear_bit | ||
| 342 | #define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a) | 152 | #define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a) |
| 343 | #define ext2_test_bit test_bit | ||
| 344 | #define ext2_find_first_zero_bit find_first_zero_bit | ||
| 345 | #define ext2_find_next_zero_bit find_next_zero_bit | ||
| 346 | 153 | ||
| 347 | /* Bitmap functions for the minix filesystem. */ | 154 | #include <asm-generic/bitops/minix.h> |
| 348 | #define minix_test_and_set_bit test_and_set_bit | ||
| 349 | #define minix_set_bit set_bit | ||
| 350 | #define minix_test_and_clear_bit test_and_clear_bit | ||
| 351 | #define minix_test_bit test_bit | ||
| 352 | #define minix_find_first_zero_bit find_first_zero_bit | ||
| 353 | 155 | ||
| 354 | #endif /* __KERNEL__ */ | 156 | #endif /* __KERNEL__ */ |
| 355 | 157 | ||
diff --git a/include/asm-v850/page.h b/include/asm-v850/page.h index b4bc85e7b91a..ad03c46a1f92 100644 --- a/include/asm-v850/page.h +++ b/include/asm-v850/page.h | |||
| @@ -111,8 +111,7 @@ typedef unsigned long pgprot_t; | |||
| 111 | #define page_to_virt(page) \ | 111 | #define page_to_virt(page) \ |
| 112 | ((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET) | 112 | ((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET) |
| 113 | 113 | ||
| 114 | #define pfn_to_page(pfn) virt_to_page (pfn_to_virt (pfn)) | 114 | #define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT) |
| 115 | #define page_to_pfn(page) virt_to_pfn (page_to_virt (page)) | ||
| 116 | #define pfn_valid(pfn) ((pfn) < max_mapnr) | 115 | #define pfn_valid(pfn) ((pfn) < max_mapnr) |
| 117 | 116 | ||
| 118 | #define virt_addr_valid(kaddr) \ | 117 | #define virt_addr_valid(kaddr) \ |
| @@ -125,6 +124,7 @@ typedef unsigned long pgprot_t; | |||
| 125 | 124 | ||
| 126 | #endif /* KERNEL */ | 125 | #endif /* KERNEL */ |
| 127 | 126 | ||
| 127 | #include <asm-generic/memory_model.h> | ||
| 128 | #include <asm-generic/page.h> | 128 | #include <asm-generic/page.h> |
| 129 | 129 | ||
| 130 | #endif /* __V850_PAGE_H__ */ | 130 | #endif /* __V850_PAGE_H__ */ |
diff --git a/include/asm-x86_64/bitops.h b/include/asm-x86_64/bitops.h index eb4df23e1e41..79212128d0f7 100644 --- a/include/asm-x86_64/bitops.h +++ b/include/asm-x86_64/bitops.h | |||
| @@ -356,14 +356,7 @@ static __inline__ unsigned long __fls(unsigned long word) | |||
| 356 | 356 | ||
| 357 | #ifdef __KERNEL__ | 357 | #ifdef __KERNEL__ |
| 358 | 358 | ||
| 359 | static inline int sched_find_first_bit(const unsigned long *b) | 359 | #include <asm-generic/bitops/sched.h> |
| 360 | { | ||
| 361 | if (b[0]) | ||
| 362 | return __ffs(b[0]); | ||
| 363 | if (b[1]) | ||
| 364 | return __ffs(b[1]) + 64; | ||
| 365 | return __ffs(b[2]) + 128; | ||
| 366 | } | ||
| 367 | 360 | ||
| 368 | /** | 361 | /** |
| 369 | * ffs - find first bit set | 362 | * ffs - find first bit set |
| @@ -412,43 +405,20 @@ static __inline__ int fls(int x) | |||
| 412 | return r+1; | 405 | return r+1; |
| 413 | } | 406 | } |
| 414 | 407 | ||
| 415 | /** | 408 | #include <asm-generic/bitops/hweight.h> |
| 416 | * hweightN - returns the hamming weight of a N-bit word | ||
| 417 | * @x: the word to weigh | ||
| 418 | * | ||
| 419 | * The Hamming Weight of a number is the total number of bits set in it. | ||
| 420 | */ | ||
| 421 | |||
| 422 | #define hweight64(x) generic_hweight64(x) | ||
| 423 | #define hweight32(x) generic_hweight32(x) | ||
| 424 | #define hweight16(x) generic_hweight16(x) | ||
| 425 | #define hweight8(x) generic_hweight8(x) | ||
| 426 | 409 | ||
| 427 | #endif /* __KERNEL__ */ | 410 | #endif /* __KERNEL__ */ |
| 428 | 411 | ||
| 429 | #ifdef __KERNEL__ | 412 | #ifdef __KERNEL__ |
| 430 | 413 | ||
| 431 | #define ext2_set_bit(nr,addr) \ | 414 | #include <asm-generic/bitops/ext2-non-atomic.h> |
| 432 | __test_and_set_bit((nr),(unsigned long*)addr) | 415 | |
| 433 | #define ext2_set_bit_atomic(lock,nr,addr) \ | 416 | #define ext2_set_bit_atomic(lock,nr,addr) \ |
| 434 | test_and_set_bit((nr),(unsigned long*)addr) | 417 | test_and_set_bit((nr),(unsigned long*)addr) |
| 435 | #define ext2_clear_bit(nr, addr) \ | ||
| 436 | __test_and_clear_bit((nr),(unsigned long*)addr) | ||
| 437 | #define ext2_clear_bit_atomic(lock,nr,addr) \ | 418 | #define ext2_clear_bit_atomic(lock,nr,addr) \ |
| 438 | test_and_clear_bit((nr),(unsigned long*)addr) | 419 | test_and_clear_bit((nr),(unsigned long*)addr) |
| 439 | #define ext2_test_bit(nr, addr) test_bit((nr),(unsigned long*)addr) | 420 | |
| 440 | #define ext2_find_first_zero_bit(addr, size) \ | 421 | #include <asm-generic/bitops/minix.h> |
| 441 | find_first_zero_bit((unsigned long*)addr, size) | ||
| 442 | #define ext2_find_next_zero_bit(addr, size, off) \ | ||
| 443 | find_next_zero_bit((unsigned long*)addr, size, off) | ||
| 444 | |||
| 445 | /* Bitmap functions for the minix filesystem. */ | ||
| 446 | #define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,(void*)addr) | ||
| 447 | #define minix_set_bit(nr,addr) __set_bit(nr,(void*)addr) | ||
| 448 | #define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,(void*)addr) | ||
| 449 | #define minix_test_bit(nr,addr) test_bit(nr,(void*)addr) | ||
| 450 | #define minix_find_first_zero_bit(addr,size) \ | ||
| 451 | find_first_zero_bit((void*)addr,size) | ||
| 452 | 422 | ||
| 453 | #endif /* __KERNEL__ */ | 423 | #endif /* __KERNEL__ */ |
| 454 | 424 | ||
diff --git a/include/asm-x86_64/futex.h b/include/asm-x86_64/futex.h index 8602c09bf89e..9804bf07b092 100644 --- a/include/asm-x86_64/futex.h +++ b/include/asm-x86_64/futex.h | |||
| @@ -94,5 +94,32 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr) | |||
| 94 | return ret; | 94 | return ret; |
| 95 | } | 95 | } |
| 96 | 96 | ||
| 97 | static inline int | ||
| 98 | futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) | ||
| 99 | { | ||
| 100 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | ||
| 101 | return -EFAULT; | ||
| 102 | |||
| 103 | __asm__ __volatile__( | ||
| 104 | "1: " LOCK_PREFIX "cmpxchgl %3, %1 \n" | ||
| 105 | |||
| 106 | "2: .section .fixup, \"ax\" \n" | ||
| 107 | "3: mov %2, %0 \n" | ||
| 108 | " jmp 2b \n" | ||
| 109 | " .previous \n" | ||
| 110 | |||
| 111 | " .section __ex_table, \"a\" \n" | ||
| 112 | " .align 8 \n" | ||
| 113 | " .quad 1b,3b \n" | ||
| 114 | " .previous \n" | ||
| 115 | |||
| 116 | : "=a" (oldval), "=m" (*uaddr) | ||
| 117 | : "i" (-EFAULT), "r" (newval), "0" (oldval) | ||
| 118 | : "memory" | ||
| 119 | ); | ||
| 120 | |||
| 121 | return oldval; | ||
| 122 | } | ||
| 123 | |||
| 97 | #endif | 124 | #endif |
| 98 | #endif | 125 | #endif |
diff --git a/include/asm-x86_64/kdebug.h b/include/asm-x86_64/kdebug.h index b9ed4c0c8783..cf795631d9b4 100644 --- a/include/asm-x86_64/kdebug.h +++ b/include/asm-x86_64/kdebug.h | |||
| @@ -5,21 +5,20 @@ | |||
| 5 | 5 | ||
| 6 | struct pt_regs; | 6 | struct pt_regs; |
| 7 | 7 | ||
| 8 | struct die_args { | 8 | struct die_args { |
| 9 | struct pt_regs *regs; | 9 | struct pt_regs *regs; |
| 10 | const char *str; | 10 | const char *str; |
| 11 | long err; | 11 | long err; |
| 12 | int trapnr; | 12 | int trapnr; |
| 13 | int signr; | 13 | int signr; |
| 14 | }; | 14 | }; |
| 15 | |||
| 16 | extern int register_die_notifier(struct notifier_block *); | ||
| 17 | extern int unregister_die_notifier(struct notifier_block *); | ||
| 18 | extern struct atomic_notifier_head die_chain; | ||
| 15 | 19 | ||
| 16 | /* Note - you should never unregister because that can race with NMIs. | ||
| 17 | If you really want to do it first unregister - then synchronize_sched - then free. | ||
| 18 | */ | ||
| 19 | int register_die_notifier(struct notifier_block *nb); | ||
| 20 | extern struct notifier_block *die_chain; | ||
| 21 | /* Grossly misnamed. */ | 20 | /* Grossly misnamed. */ |
| 22 | enum die_val { | 21 | enum die_val { |
| 23 | DIE_OOPS = 1, | 22 | DIE_OOPS = 1, |
| 24 | DIE_INT3, | 23 | DIE_INT3, |
| 25 | DIE_DEBUG, | 24 | DIE_DEBUG, |
| @@ -33,8 +32,8 @@ enum die_val { | |||
| 33 | DIE_CALL, | 32 | DIE_CALL, |
| 34 | DIE_NMI_IPI, | 33 | DIE_NMI_IPI, |
| 35 | DIE_PAGE_FAULT, | 34 | DIE_PAGE_FAULT, |
| 36 | }; | 35 | }; |
| 37 | 36 | ||
| 38 | static inline int notify_die(enum die_val val, const char *str, | 37 | static inline int notify_die(enum die_val val, const char *str, |
| 39 | struct pt_regs *regs, long err, int trap, int sig) | 38 | struct pt_regs *regs, long err, int trap, int sig) |
| 40 | { | 39 | { |
| @@ -45,7 +44,7 @@ static inline int notify_die(enum die_val val, const char *str, | |||
| 45 | .trapnr = trap, | 44 | .trapnr = trap, |
| 46 | .signr = sig | 45 | .signr = sig |
| 47 | }; | 46 | }; |
| 48 | return notifier_call_chain(&die_chain, val, &args); | 47 | return atomic_notifier_call_chain(&die_chain, val, &args); |
| 49 | } | 48 | } |
| 50 | 49 | ||
| 51 | extern int printk_address(unsigned long address); | 50 | extern int printk_address(unsigned long address); |
diff --git a/include/asm-x86_64/mmzone.h b/include/asm-x86_64/mmzone.h index 937f99b26883..6b18cd8f293d 100644 --- a/include/asm-x86_64/mmzone.h +++ b/include/asm-x86_64/mmzone.h | |||
| @@ -44,12 +44,8 @@ static inline __attribute__((pure)) int phys_to_nid(unsigned long addr) | |||
| 44 | #define pfn_to_nid(pfn) phys_to_nid((unsigned long)(pfn) << PAGE_SHIFT) | 44 | #define pfn_to_nid(pfn) phys_to_nid((unsigned long)(pfn) << PAGE_SHIFT) |
| 45 | #define kvaddr_to_nid(kaddr) phys_to_nid(__pa(kaddr)) | 45 | #define kvaddr_to_nid(kaddr) phys_to_nid(__pa(kaddr)) |
| 46 | 46 | ||
| 47 | extern struct page *pfn_to_page(unsigned long pfn); | ||
| 48 | extern unsigned long page_to_pfn(struct page *page); | ||
| 49 | extern int pfn_valid(unsigned long pfn); | 47 | extern int pfn_valid(unsigned long pfn); |
| 50 | #endif | 48 | #endif |
| 51 | 49 | ||
| 52 | #define local_mapnr(kvaddr) \ | ||
| 53 | ( (__pa(kvaddr) >> PAGE_SHIFT) - node_start_pfn(kvaddr_to_nid(kvaddr)) ) | ||
| 54 | #endif | 50 | #endif |
| 55 | #endif | 51 | #endif |
diff --git a/include/asm-x86_64/page.h b/include/asm-x86_64/page.h index 615e3e494929..408185bac351 100644 --- a/include/asm-x86_64/page.h +++ b/include/asm-x86_64/page.h | |||
| @@ -123,8 +123,6 @@ typedef struct { unsigned long pgprot; } pgprot_t; | |||
| 123 | #define __boot_va(x) __va(x) | 123 | #define __boot_va(x) __va(x) |
| 124 | #define __boot_pa(x) __pa(x) | 124 | #define __boot_pa(x) __pa(x) |
| 125 | #ifdef CONFIG_FLATMEM | 125 | #ifdef CONFIG_FLATMEM |
| 126 | #define pfn_to_page(pfn) (mem_map + (pfn)) | ||
| 127 | #define page_to_pfn(page) ((unsigned long)((page) - mem_map)) | ||
| 128 | #define pfn_valid(pfn) ((pfn) < end_pfn) | 126 | #define pfn_valid(pfn) ((pfn) < end_pfn) |
| 129 | #endif | 127 | #endif |
| 130 | 128 | ||
| @@ -140,6 +138,7 @@ typedef struct { unsigned long pgprot; } pgprot_t; | |||
| 140 | 138 | ||
| 141 | #endif /* __KERNEL__ */ | 139 | #endif /* __KERNEL__ */ |
| 142 | 140 | ||
| 141 | #include <asm-generic/memory_model.h> | ||
| 143 | #include <asm-generic/page.h> | 142 | #include <asm-generic/page.h> |
| 144 | 143 | ||
| 145 | #endif /* _X86_64_PAGE_H */ | 144 | #endif /* _X86_64_PAGE_H */ |
diff --git a/include/asm-x86_64/processor.h b/include/asm-x86_64/processor.h index 8c8d88c036ed..1aa2cee43344 100644 --- a/include/asm-x86_64/processor.h +++ b/include/asm-x86_64/processor.h | |||
| @@ -20,6 +20,7 @@ | |||
| 20 | #include <asm/mmsegment.h> | 20 | #include <asm/mmsegment.h> |
| 21 | #include <asm/percpu.h> | 21 | #include <asm/percpu.h> |
| 22 | #include <linux/personality.h> | 22 | #include <linux/personality.h> |
| 23 | #include <linux/cpumask.h> | ||
| 23 | 24 | ||
| 24 | #define TF_MASK 0x00000100 | 25 | #define TF_MASK 0x00000100 |
| 25 | #define IF_MASK 0x00000200 | 26 | #define IF_MASK 0x00000200 |
| @@ -65,6 +66,9 @@ struct cpuinfo_x86 { | |||
| 65 | __u32 x86_power; | 66 | __u32 x86_power; |
| 66 | __u32 extended_cpuid_level; /* Max extended CPUID function supported */ | 67 | __u32 extended_cpuid_level; /* Max extended CPUID function supported */ |
| 67 | unsigned long loops_per_jiffy; | 68 | unsigned long loops_per_jiffy; |
| 69 | #ifdef CONFIG_SMP | ||
| 70 | cpumask_t llc_shared_map; /* cpus sharing the last level cache */ | ||
| 71 | #endif | ||
| 68 | __u8 apicid; | 72 | __u8 apicid; |
| 69 | __u8 booted_cores; /* number of cores as seen by OS */ | 73 | __u8 booted_cores; /* number of cores as seen by OS */ |
| 70 | } ____cacheline_aligned; | 74 | } ____cacheline_aligned; |
diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h index 9ccbb2cfd5c0..a4fdaeb5c397 100644 --- a/include/asm-x86_64/smp.h +++ b/include/asm-x86_64/smp.h | |||
| @@ -56,6 +56,7 @@ extern cpumask_t cpu_sibling_map[NR_CPUS]; | |||
| 56 | extern cpumask_t cpu_core_map[NR_CPUS]; | 56 | extern cpumask_t cpu_core_map[NR_CPUS]; |
| 57 | extern u8 phys_proc_id[NR_CPUS]; | 57 | extern u8 phys_proc_id[NR_CPUS]; |
| 58 | extern u8 cpu_core_id[NR_CPUS]; | 58 | extern u8 cpu_core_id[NR_CPUS]; |
| 59 | extern u8 cpu_llc_id[NR_CPUS]; | ||
| 59 | 60 | ||
| 60 | #define SMP_TRAMPOLINE_BASE 0x6000 | 61 | #define SMP_TRAMPOLINE_BASE 0x6000 |
| 61 | 62 | ||
diff --git a/include/asm-x86_64/topology.h b/include/asm-x86_64/topology.h index c642f5d9882d..9db54e9d17bb 100644 --- a/include/asm-x86_64/topology.h +++ b/include/asm-x86_64/topology.h | |||
| @@ -68,4 +68,6 @@ extern int __node_distance(int, int); | |||
| 68 | 68 | ||
| 69 | #include <asm-generic/topology.h> | 69 | #include <asm-generic/topology.h> |
| 70 | 70 | ||
| 71 | extern cpumask_t cpu_coregroup_map(int cpu); | ||
| 72 | |||
| 71 | #endif | 73 | #endif |
diff --git a/include/asm-x86_64/unistd.h b/include/asm-x86_64/unistd.h index da0341c57949..fcc516353087 100644 --- a/include/asm-x86_64/unistd.h +++ b/include/asm-x86_64/unistd.h | |||
| @@ -605,8 +605,12 @@ __SYSCALL(__NR_pselect6, sys_ni_syscall) /* for now */ | |||
| 605 | __SYSCALL(__NR_ppoll, sys_ni_syscall) /* for now */ | 605 | __SYSCALL(__NR_ppoll, sys_ni_syscall) /* for now */ |
| 606 | #define __NR_unshare 272 | 606 | #define __NR_unshare 272 |
| 607 | __SYSCALL(__NR_unshare, sys_unshare) | 607 | __SYSCALL(__NR_unshare, sys_unshare) |
| 608 | #define __NR_set_robust_list 273 | ||
| 609 | __SYSCALL(__NR_set_robust_list, sys_set_robust_list) | ||
| 610 | #define __NR_get_robust_list 274 | ||
| 611 | __SYSCALL(__NR_get_robust_list, sys_get_robust_list) | ||
| 608 | 612 | ||
| 609 | #define __NR_syscall_max __NR_unshare | 613 | #define __NR_syscall_max __NR_get_robust_list |
| 610 | 614 | ||
| 611 | #ifndef __NO_STUBS | 615 | #ifndef __NO_STUBS |
| 612 | 616 | ||
diff --git a/include/asm-xtensa/bitops.h b/include/asm-xtensa/bitops.h index 0a2065f1a372..d815649617aa 100644 --- a/include/asm-xtensa/bitops.h +++ b/include/asm-xtensa/bitops.h | |||
| @@ -23,156 +23,11 @@ | |||
| 23 | # error SMP not supported on this architecture | 23 | # error SMP not supported on this architecture |
| 24 | #endif | 24 | #endif |
| 25 | 25 | ||
| 26 | static __inline__ void set_bit(int nr, volatile void * addr) | ||
| 27 | { | ||
| 28 | unsigned long mask = 1 << (nr & 0x1f); | ||
| 29 | unsigned long *a = ((unsigned long *)addr) + (nr >> 5); | ||
| 30 | unsigned long flags; | ||
| 31 | |||
| 32 | local_irq_save(flags); | ||
| 33 | *a |= mask; | ||
| 34 | local_irq_restore(flags); | ||
| 35 | } | ||
| 36 | |||
| 37 | static __inline__ void __set_bit(int nr, volatile unsigned long * addr) | ||
| 38 | { | ||
| 39 | unsigned long mask = 1 << (nr & 0x1f); | ||
| 40 | unsigned long *a = ((unsigned long *)addr) + (nr >> 5); | ||
| 41 | |||
| 42 | *a |= mask; | ||
| 43 | } | ||
| 44 | |||
| 45 | static __inline__ void clear_bit(int nr, volatile void * addr) | ||
| 46 | { | ||
| 47 | unsigned long mask = 1 << (nr & 0x1f); | ||
| 48 | unsigned long *a = ((unsigned long *)addr) + (nr >> 5); | ||
| 49 | unsigned long flags; | ||
| 50 | |||
| 51 | local_irq_save(flags); | ||
| 52 | *a &= ~mask; | ||
| 53 | local_irq_restore(flags); | ||
| 54 | } | ||
| 55 | |||
| 56 | static __inline__ void __clear_bit(int nr, volatile unsigned long *addr) | ||
| 57 | { | ||
| 58 | unsigned long mask = 1 << (nr & 0x1f); | ||
| 59 | unsigned long *a = ((unsigned long *)addr) + (nr >> 5); | ||
| 60 | |||
| 61 | *a &= ~mask; | ||
| 62 | } | ||
| 63 | |||
| 64 | /* | ||
| 65 | * clear_bit() doesn't provide any barrier for the compiler. | ||
| 66 | */ | ||
| 67 | |||
| 68 | #define smp_mb__before_clear_bit() barrier() | 26 | #define smp_mb__before_clear_bit() barrier() |
| 69 | #define smp_mb__after_clear_bit() barrier() | 27 | #define smp_mb__after_clear_bit() barrier() |
| 70 | 28 | ||
| 71 | static __inline__ void change_bit(int nr, volatile void * addr) | 29 | #include <asm-generic/bitops/atomic.h> |
| 72 | { | 30 | #include <asm-generic/bitops/non-atomic.h> |
| 73 | unsigned long mask = 1 << (nr & 0x1f); | ||
| 74 | unsigned long *a = ((unsigned long *)addr) + (nr >> 5); | ||
| 75 | unsigned long flags; | ||
| 76 | |||
| 77 | local_irq_save(flags); | ||
| 78 | *a ^= mask; | ||
| 79 | local_irq_restore(flags); | ||
| 80 | } | ||
| 81 | |||
| 82 | static __inline__ void __change_bit(int nr, volatile void * addr) | ||
| 83 | { | ||
| 84 | unsigned long mask = 1 << (nr & 0x1f); | ||
| 85 | unsigned long *a = ((unsigned long *)addr) + (nr >> 5); | ||
| 86 | |||
| 87 | *a ^= mask; | ||
| 88 | } | ||
| 89 | |||
| 90 | static __inline__ int test_and_set_bit(int nr, volatile void * addr) | ||
| 91 | { | ||
| 92 | unsigned long retval; | ||
| 93 | unsigned long mask = 1 << (nr & 0x1f); | ||
| 94 | unsigned long *a = ((unsigned long *)addr) + (nr >> 5); | ||
| 95 | unsigned long flags; | ||
| 96 | |||
| 97 | local_irq_save(flags); | ||
| 98 | retval = (mask & *a) != 0; | ||
| 99 | *a |= mask; | ||
| 100 | local_irq_restore(flags); | ||
| 101 | |||
| 102 | return retval; | ||
| 103 | } | ||
| 104 | |||
| 105 | static __inline__ int __test_and_set_bit(int nr, volatile void * addr) | ||
| 106 | { | ||
| 107 | unsigned long retval; | ||
| 108 | unsigned long mask = 1 << (nr & 0x1f); | ||
| 109 | unsigned long *a = ((unsigned long *)addr) + (nr >> 5); | ||
| 110 | |||
| 111 | retval = (mask & *a) != 0; | ||
| 112 | *a |= mask; | ||
| 113 | |||
| 114 | return retval; | ||
| 115 | } | ||
| 116 | |||
| 117 | static __inline__ int test_and_clear_bit(int nr, volatile void * addr) | ||
| 118 | { | ||
| 119 | unsigned long retval; | ||
| 120 | unsigned long mask = 1 << (nr & 0x1f); | ||
| 121 | unsigned long *a = ((unsigned long *)addr) + (nr >> 5); | ||
| 122 | unsigned long flags; | ||
| 123 | |||
| 124 | local_irq_save(flags); | ||
| 125 | retval = (mask & *a) != 0; | ||
| 126 | *a &= ~mask; | ||
| 127 | local_irq_restore(flags); | ||
| 128 | |||
| 129 | return retval; | ||
| 130 | } | ||
| 131 | |||
| 132 | static __inline__ int __test_and_clear_bit(int nr, volatile void * addr) | ||
| 133 | { | ||
| 134 | unsigned long mask = 1 << (nr & 0x1f); | ||
| 135 | unsigned long *a = ((unsigned long *)addr) + (nr >> 5); | ||
| 136 | unsigned long old = *a; | ||
| 137 | |||
| 138 | *a = old & ~mask; | ||
| 139 | return (old & mask) != 0; | ||
| 140 | } | ||
| 141 | |||
| 142 | static __inline__ int test_and_change_bit(int nr, volatile void * addr) | ||
| 143 | { | ||
| 144 | unsigned long retval; | ||
| 145 | unsigned long mask = 1 << (nr & 0x1f); | ||
| 146 | unsigned long *a = ((unsigned long *)addr) + (nr >> 5); | ||
| 147 | unsigned long flags; | ||
| 148 | |||
| 149 | local_irq_save(flags); | ||
| 150 | |||
| 151 | retval = (mask & *a) != 0; | ||
| 152 | *a ^= mask; | ||
| 153 | local_irq_restore(flags); | ||
| 154 | |||
| 155 | return retval; | ||
| 156 | } | ||
| 157 | |||
| 158 | /* | ||
| 159 | * non-atomic version; can be reordered | ||
| 160 | */ | ||
| 161 | |||
| 162 | static __inline__ int __test_and_change_bit(int nr, volatile void *addr) | ||
| 163 | { | ||
| 164 | unsigned long mask = 1 << (nr & 0x1f); | ||
| 165 | unsigned long *a = ((unsigned long *)addr) + (nr >> 5); | ||
| 166 | unsigned long old = *a; | ||
| 167 | |||
| 168 | *a = old ^ mask; | ||
| 169 | return (old & mask) != 0; | ||
| 170 | } | ||
| 171 | |||
| 172 | static __inline__ int test_bit(int nr, const volatile void *addr) | ||
| 173 | { | ||
| 174 | return 1UL & (((const volatile unsigned int *)addr)[nr>>5] >> (nr&31)); | ||
| 175 | } | ||
| 176 | 31 | ||
| 177 | #if XCHAL_HAVE_NSA | 32 | #if XCHAL_HAVE_NSA |
| 178 | 33 | ||
| @@ -245,202 +100,23 @@ static __inline__ int fls (unsigned int x) | |||
| 245 | { | 100 | { |
| 246 | return __cntlz(x); | 101 | return __cntlz(x); |
| 247 | } | 102 | } |
| 248 | #define fls64(x) generic_fls64(x) | 103 | #include <asm-generic/bitops/fls64.h> |
| 249 | 104 | #include <asm-generic/bitops/find.h> | |
| 250 | static __inline__ int | 105 | #include <asm-generic/bitops/ext2-non-atomic.h> |
| 251 | find_next_bit(const unsigned long *addr, int size, int offset) | ||
| 252 | { | ||
| 253 | const unsigned long *p = addr + (offset >> 5); | ||
| 254 | unsigned long result = offset & ~31UL; | ||
| 255 | unsigned long tmp; | ||
| 256 | |||
| 257 | if (offset >= size) | ||
| 258 | return size; | ||
| 259 | size -= result; | ||
| 260 | offset &= 31UL; | ||
| 261 | if (offset) { | ||
| 262 | tmp = *p++; | ||
| 263 | tmp &= ~0UL << offset; | ||
| 264 | if (size < 32) | ||
| 265 | goto found_first; | ||
| 266 | if (tmp) | ||
| 267 | goto found_middle; | ||
| 268 | size -= 32; | ||
| 269 | result += 32; | ||
| 270 | } | ||
| 271 | while (size >= 32) { | ||
| 272 | if ((tmp = *p++) != 0) | ||
| 273 | goto found_middle; | ||
| 274 | result += 32; | ||
| 275 | size -= 32; | ||
| 276 | } | ||
| 277 | if (!size) | ||
| 278 | return result; | ||
| 279 | tmp = *p; | ||
| 280 | |||
| 281 | found_first: | ||
| 282 | tmp &= ~0UL >> (32 - size); | ||
| 283 | if (tmp == 0UL) /* Are any bits set? */ | ||
| 284 | return result + size; /* Nope. */ | ||
| 285 | found_middle: | ||
| 286 | return result + __ffs(tmp); | ||
| 287 | } | ||
| 288 | |||
| 289 | /** | ||
| 290 | * find_first_bit - find the first set bit in a memory region | ||
| 291 | * @addr: The address to start the search at | ||
| 292 | * @size: The maximum size to search | ||
| 293 | * | ||
| 294 | * Returns the bit-number of the first set bit, not the number of the byte | ||
| 295 | * containing a bit. | ||
| 296 | */ | ||
| 297 | |||
| 298 | #define find_first_bit(addr, size) \ | ||
| 299 | find_next_bit((addr), (size), 0) | ||
| 300 | |||
| 301 | static __inline__ int | ||
| 302 | find_next_zero_bit(const unsigned long *addr, int size, int offset) | ||
| 303 | { | ||
| 304 | const unsigned long *p = addr + (offset >> 5); | ||
| 305 | unsigned long result = offset & ~31UL; | ||
| 306 | unsigned long tmp; | ||
| 307 | |||
| 308 | if (offset >= size) | ||
| 309 | return size; | ||
| 310 | size -= result; | ||
| 311 | offset &= 31UL; | ||
| 312 | if (offset) { | ||
| 313 | tmp = *p++; | ||
| 314 | tmp |= ~0UL >> (32-offset); | ||
| 315 | if (size < 32) | ||
| 316 | goto found_first; | ||
| 317 | if (~tmp) | ||
| 318 | goto found_middle; | ||
| 319 | size -= 32; | ||
| 320 | result += 32; | ||
| 321 | } | ||
| 322 | while (size & ~31UL) { | ||
| 323 | if (~(tmp = *p++)) | ||
| 324 | goto found_middle; | ||
| 325 | result += 32; | ||
| 326 | size -= 32; | ||
| 327 | } | ||
| 328 | if (!size) | ||
| 329 | return result; | ||
| 330 | tmp = *p; | ||
| 331 | |||
| 332 | found_first: | ||
| 333 | tmp |= ~0UL << size; | ||
| 334 | found_middle: | ||
| 335 | return result + ffz(tmp); | ||
| 336 | } | ||
| 337 | |||
| 338 | #define find_first_zero_bit(addr, size) \ | ||
| 339 | find_next_zero_bit((addr), (size), 0) | ||
| 340 | 106 | ||
| 341 | #ifdef __XTENSA_EL__ | 107 | #ifdef __XTENSA_EL__ |
| 342 | # define ext2_set_bit(nr,addr) __test_and_set_bit((nr), (addr)) | ||
| 343 | # define ext2_set_bit_atomic(lock,nr,addr) test_and_set_bit((nr),(addr)) | 108 | # define ext2_set_bit_atomic(lock,nr,addr) test_and_set_bit((nr),(addr)) |
| 344 | # define ext2_clear_bit(nr,addr) __test_and_clear_bit((nr), (addr)) | ||
| 345 | # define ext2_clear_bit_atomic(lock,nr,addr) test_and_clear_bit((nr),(addr)) | 109 | # define ext2_clear_bit_atomic(lock,nr,addr) test_and_clear_bit((nr),(addr)) |
| 346 | # define ext2_test_bit(nr,addr) test_bit((nr), (addr)) | ||
| 347 | # define ext2_find_first_zero_bit(addr, size) find_first_zero_bit((addr),(size)) | ||
| 348 | # define ext2_find_next_zero_bit(addr, size, offset) \ | ||
| 349 | find_next_zero_bit((addr), (size), (offset)) | ||
| 350 | #elif defined(__XTENSA_EB__) | 110 | #elif defined(__XTENSA_EB__) |
| 351 | # define ext2_set_bit(nr,addr) __test_and_set_bit((nr) ^ 0x18, (addr)) | ||
| 352 | # define ext2_set_bit_atomic(lock,nr,addr) test_and_set_bit((nr) ^ 0x18, (addr)) | 111 | # define ext2_set_bit_atomic(lock,nr,addr) test_and_set_bit((nr) ^ 0x18, (addr)) |
| 353 | # define ext2_clear_bit(nr,addr) __test_and_clear_bit((nr) ^ 18, (addr)) | ||
| 354 | # define ext2_clear_bit_atomic(lock,nr,addr) test_and_clear_bit((nr)^0x18,(addr)) | 112 | # define ext2_clear_bit_atomic(lock,nr,addr) test_and_clear_bit((nr)^0x18,(addr)) |
| 355 | # define ext2_test_bit(nr,addr) test_bit((nr) ^ 0x18, (addr)) | ||
| 356 | # define ext2_find_first_zero_bit(addr, size) \ | ||
| 357 | ext2_find_next_zero_bit((addr), (size), 0) | ||
| 358 | |||
| 359 | static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset) | ||
| 360 | { | ||
| 361 | unsigned long *p = ((unsigned long *) addr) + (offset >> 5); | ||
| 362 | unsigned long result = offset & ~31UL; | ||
| 363 | unsigned long tmp; | ||
| 364 | |||
| 365 | if (offset >= size) | ||
| 366 | return size; | ||
| 367 | size -= result; | ||
| 368 | offset &= 31UL; | ||
| 369 | if(offset) { | ||
| 370 | /* We hold the little endian value in tmp, but then the | ||
| 371 | * shift is illegal. So we could keep a big endian value | ||
| 372 | * in tmp, like this: | ||
| 373 | * | ||
| 374 | * tmp = __swab32(*(p++)); | ||
| 375 | * tmp |= ~0UL >> (32-offset); | ||
| 376 | * | ||
| 377 | * but this would decrease preformance, so we change the | ||
| 378 | * shift: | ||
| 379 | */ | ||
| 380 | tmp = *(p++); | ||
| 381 | tmp |= __swab32(~0UL >> (32-offset)); | ||
| 382 | if(size < 32) | ||
| 383 | goto found_first; | ||
| 384 | if(~tmp) | ||
| 385 | goto found_middle; | ||
| 386 | size -= 32; | ||
| 387 | result += 32; | ||
| 388 | } | ||
| 389 | while(size & ~31UL) { | ||
| 390 | if(~(tmp = *(p++))) | ||
| 391 | goto found_middle; | ||
| 392 | result += 32; | ||
| 393 | size -= 32; | ||
| 394 | } | ||
| 395 | if(!size) | ||
| 396 | return result; | ||
| 397 | tmp = *p; | ||
| 398 | |||
| 399 | found_first: | ||
| 400 | /* tmp is little endian, so we would have to swab the shift, | ||
| 401 | * see above. But then we have to swab tmp below for ffz, so | ||
| 402 | * we might as well do this here. | ||
| 403 | */ | ||
| 404 | return result + ffz(__swab32(tmp) | (~0UL << size)); | ||
| 405 | found_middle: | ||
| 406 | return result + ffz(__swab32(tmp)); | ||
| 407 | } | ||
| 408 | |||
| 409 | #else | 113 | #else |
| 410 | # error processor byte order undefined! | 114 | # error processor byte order undefined! |
| 411 | #endif | 115 | #endif |
| 412 | 116 | ||
| 413 | 117 | #include <asm-generic/bitops/hweight.h> | |
| 414 | #define hweight32(x) generic_hweight32(x) | 118 | #include <asm-generic/bitops/sched.h> |
| 415 | #define hweight16(x) generic_hweight16(x) | 119 | #include <asm-generic/bitops/minix.h> |
| 416 | #define hweight8(x) generic_hweight8(x) | ||
| 417 | |||
| 418 | /* | ||
| 419 | * Find the first bit set in a 140-bit bitmap. | ||
| 420 | * The first 100 bits are unlikely to be set. | ||
| 421 | */ | ||
| 422 | |||
| 423 | static inline int sched_find_first_bit(const unsigned long *b) | ||
| 424 | { | ||
| 425 | if (unlikely(b[0])) | ||
| 426 | return __ffs(b[0]); | ||
| 427 | if (unlikely(b[1])) | ||
| 428 | return __ffs(b[1]) + 32; | ||
| 429 | if (unlikely(b[2])) | ||
| 430 | return __ffs(b[2]) + 64; | ||
| 431 | if (b[3]) | ||
| 432 | return __ffs(b[3]) + 96; | ||
| 433 | return __ffs(b[4]) + 128; | ||
| 434 | } | ||
| 435 | |||
| 436 | |||
| 437 | /* Bitmap functions for the minix filesystem. */ | ||
| 438 | |||
| 439 | #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr) | ||
| 440 | #define minix_set_bit(nr,addr) set_bit(nr,addr) | ||
| 441 | #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr) | ||
| 442 | #define minix_test_bit(nr,addr) test_bit(nr,addr) | ||
| 443 | #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) | ||
| 444 | 120 | ||
| 445 | #endif /* __KERNEL__ */ | 121 | #endif /* __KERNEL__ */ |
| 446 | 122 | ||
diff --git a/include/asm-xtensa/page.h b/include/asm-xtensa/page.h index 8ded36f255a2..992bac5c1258 100644 --- a/include/asm-xtensa/page.h +++ b/include/asm-xtensa/page.h | |||
| @@ -109,10 +109,7 @@ void copy_user_page(void *to,void* from,unsigned long vaddr,struct page* page); | |||
| 109 | #define __pa(x) ((unsigned long) (x) - PAGE_OFFSET) | 109 | #define __pa(x) ((unsigned long) (x) - PAGE_OFFSET) |
| 110 | #define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET)) | 110 | #define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET)) |
| 111 | #define pfn_valid(pfn) ((unsigned long)pfn < max_mapnr) | 111 | #define pfn_valid(pfn) ((unsigned long)pfn < max_mapnr) |
| 112 | #ifndef CONFIG_DISCONTIGMEM | 112 | #ifdef CONFIG_DISCONTIGMEM |
| 113 | # define pfn_to_page(pfn) (mem_map + (pfn)) | ||
| 114 | # define page_to_pfn(page) ((unsigned long)((page) - mem_map)) | ||
| 115 | #else | ||
| 116 | # error CONFIG_DISCONTIGMEM not supported | 113 | # error CONFIG_DISCONTIGMEM not supported |
| 117 | #endif | 114 | #endif |
| 118 | 115 | ||
| @@ -130,4 +127,5 @@ void copy_user_page(void *to,void* from,unsigned long vaddr,struct page* page); | |||
| 130 | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) | 127 | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) |
| 131 | 128 | ||
| 132 | #endif /* __KERNEL__ */ | 129 | #endif /* __KERNEL__ */ |
| 130 | #include <asm-generic/memory_model.h> | ||
| 133 | #endif /* _XTENSA_PAGE_H */ | 131 | #endif /* _XTENSA_PAGE_H */ |
diff --git a/include/linux/adb.h b/include/linux/adb.h index e9fdc63483c7..b7305b178279 100644 --- a/include/linux/adb.h +++ b/include/linux/adb.h | |||
| @@ -85,7 +85,7 @@ enum adb_message { | |||
| 85 | ADB_MSG_POST_RESET /* Called after resetting the bus (re-do init & register) */ | 85 | ADB_MSG_POST_RESET /* Called after resetting the bus (re-do init & register) */ |
| 86 | }; | 86 | }; |
| 87 | extern struct adb_driver *adb_controller; | 87 | extern struct adb_driver *adb_controller; |
| 88 | extern struct notifier_block *adb_client_list; | 88 | extern struct blocking_notifier_head adb_client_list; |
| 89 | 89 | ||
| 90 | int adb_request(struct adb_request *req, void (*done)(struct adb_request *), | 90 | int adb_request(struct adb_request *req, void (*done)(struct adb_request *), |
| 91 | int flags, int nbytes, ...); | 91 | int flags, int nbytes, ...); |
diff --git a/include/linux/auto_fs4.h b/include/linux/auto_fs4.h index 9343c89d843c..0a6bc52ffe88 100644 --- a/include/linux/auto_fs4.h +++ b/include/linux/auto_fs4.h | |||
| @@ -19,18 +19,37 @@ | |||
| 19 | #undef AUTOFS_MIN_PROTO_VERSION | 19 | #undef AUTOFS_MIN_PROTO_VERSION |
| 20 | #undef AUTOFS_MAX_PROTO_VERSION | 20 | #undef AUTOFS_MAX_PROTO_VERSION |
| 21 | 21 | ||
| 22 | #define AUTOFS_PROTO_VERSION 4 | 22 | #define AUTOFS_PROTO_VERSION 5 |
| 23 | #define AUTOFS_MIN_PROTO_VERSION 3 | 23 | #define AUTOFS_MIN_PROTO_VERSION 3 |
| 24 | #define AUTOFS_MAX_PROTO_VERSION 4 | 24 | #define AUTOFS_MAX_PROTO_VERSION 5 |
| 25 | 25 | ||
| 26 | #define AUTOFS_PROTO_SUBVERSION 7 | 26 | #define AUTOFS_PROTO_SUBVERSION 0 |
| 27 | 27 | ||
| 28 | /* Mask for expire behaviour */ | 28 | /* Mask for expire behaviour */ |
| 29 | #define AUTOFS_EXP_IMMEDIATE 1 | 29 | #define AUTOFS_EXP_IMMEDIATE 1 |
| 30 | #define AUTOFS_EXP_LEAVES 2 | 30 | #define AUTOFS_EXP_LEAVES 2 |
| 31 | 31 | ||
| 32 | /* New message type */ | 32 | /* Daemon notification packet types */ |
| 33 | #define autofs_ptype_expire_multi 2 /* Expire entry (umount request) */ | 33 | enum autofs_notify { |
| 34 | NFY_NONE, | ||
| 35 | NFY_MOUNT, | ||
| 36 | NFY_EXPIRE | ||
| 37 | }; | ||
| 38 | |||
| 39 | /* Kernel protocol version 4 packet types */ | ||
| 40 | |||
| 41 | /* Expire entry (umount request) */ | ||
| 42 | #define autofs_ptype_expire_multi 2 | ||
| 43 | |||
| 44 | /* Kernel protocol version 5 packet types */ | ||
| 45 | |||
| 46 | /* Indirect mount missing and expire requests. */ | ||
| 47 | #define autofs_ptype_missing_indirect 3 | ||
| 48 | #define autofs_ptype_expire_indirect 4 | ||
| 49 | |||
| 50 | /* Direct mount missing and expire requests */ | ||
| 51 | #define autofs_ptype_missing_direct 5 | ||
| 52 | #define autofs_ptype_expire_direct 6 | ||
| 34 | 53 | ||
| 35 | /* v4 multi expire (via pipe) */ | 54 | /* v4 multi expire (via pipe) */ |
| 36 | struct autofs_packet_expire_multi { | 55 | struct autofs_packet_expire_multi { |
| @@ -40,14 +59,36 @@ struct autofs_packet_expire_multi { | |||
| 40 | char name[NAME_MAX+1]; | 59 | char name[NAME_MAX+1]; |
| 41 | }; | 60 | }; |
| 42 | 61 | ||
| 62 | /* autofs v5 common packet struct */ | ||
| 63 | struct autofs_v5_packet { | ||
| 64 | struct autofs_packet_hdr hdr; | ||
| 65 | autofs_wqt_t wait_queue_token; | ||
| 66 | __u32 dev; | ||
| 67 | __u64 ino; | ||
| 68 | __u32 uid; | ||
| 69 | __u32 gid; | ||
| 70 | __u32 pid; | ||
| 71 | __u32 tgid; | ||
| 72 | __u32 len; | ||
| 73 | char name[NAME_MAX+1]; | ||
| 74 | }; | ||
| 75 | |||
| 76 | typedef struct autofs_v5_packet autofs_packet_missing_indirect_t; | ||
| 77 | typedef struct autofs_v5_packet autofs_packet_expire_indirect_t; | ||
| 78 | typedef struct autofs_v5_packet autofs_packet_missing_direct_t; | ||
| 79 | typedef struct autofs_v5_packet autofs_packet_expire_direct_t; | ||
| 80 | |||
| 43 | union autofs_packet_union { | 81 | union autofs_packet_union { |
| 44 | struct autofs_packet_hdr hdr; | 82 | struct autofs_packet_hdr hdr; |
| 45 | struct autofs_packet_missing missing; | 83 | struct autofs_packet_missing missing; |
| 46 | struct autofs_packet_expire expire; | 84 | struct autofs_packet_expire expire; |
| 47 | struct autofs_packet_expire_multi expire_multi; | 85 | struct autofs_packet_expire_multi expire_multi; |
| 86 | struct autofs_v5_packet v5_packet; | ||
| 48 | }; | 87 | }; |
| 49 | 88 | ||
| 50 | #define AUTOFS_IOC_EXPIRE_MULTI _IOW(0x93,0x66,int) | 89 | #define AUTOFS_IOC_EXPIRE_MULTI _IOW(0x93,0x66,int) |
| 90 | #define AUTOFS_IOC_EXPIRE_INDIRECT AUTOFS_IOC_EXPIRE_MULTI | ||
| 91 | #define AUTOFS_IOC_EXPIRE_DIRECT AUTOFS_IOC_EXPIRE_MULTI | ||
| 51 | #define AUTOFS_IOC_PROTOSUBVER _IOR(0x93,0x67,int) | 92 | #define AUTOFS_IOC_PROTOSUBVER _IOR(0x93,0x67,int) |
| 52 | #define AUTOFS_IOC_ASKREGHOST _IOR(0x93,0x68,int) | 93 | #define AUTOFS_IOC_ASKREGHOST _IOR(0x93,0x68,int) |
| 53 | #define AUTOFS_IOC_TOGGLEREGHOST _IOR(0x93,0x69,int) | 94 | #define AUTOFS_IOC_TOGGLEREGHOST _IOR(0x93,0x69,int) |
diff --git a/include/linux/bitops.h b/include/linux/bitops.h index f17525a963d1..5d1eabcde5d5 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h | |||
| @@ -3,88 +3,11 @@ | |||
| 3 | #include <asm/types.h> | 3 | #include <asm/types.h> |
| 4 | 4 | ||
| 5 | /* | 5 | /* |
| 6 | * ffs: find first bit set. This is defined the same way as | ||
| 7 | * the libc and compiler builtin ffs routines, therefore | ||
| 8 | * differs in spirit from the above ffz (man ffs). | ||
| 9 | */ | ||
| 10 | |||
| 11 | static inline int generic_ffs(int x) | ||
| 12 | { | ||
| 13 | int r = 1; | ||
| 14 | |||
| 15 | if (!x) | ||
| 16 | return 0; | ||
| 17 | if (!(x & 0xffff)) { | ||
| 18 | x >>= 16; | ||
| 19 | r += 16; | ||
| 20 | } | ||
| 21 | if (!(x & 0xff)) { | ||
| 22 | x >>= 8; | ||
| 23 | r += 8; | ||
| 24 | } | ||
| 25 | if (!(x & 0xf)) { | ||
| 26 | x >>= 4; | ||
| 27 | r += 4; | ||
| 28 | } | ||
| 29 | if (!(x & 3)) { | ||
| 30 | x >>= 2; | ||
| 31 | r += 2; | ||
| 32 | } | ||
| 33 | if (!(x & 1)) { | ||
| 34 | x >>= 1; | ||
| 35 | r += 1; | ||
| 36 | } | ||
| 37 | return r; | ||
| 38 | } | ||
| 39 | |||
| 40 | /* | ||
| 41 | * fls: find last bit set. | ||
| 42 | */ | ||
| 43 | |||
| 44 | static __inline__ int generic_fls(int x) | ||
| 45 | { | ||
| 46 | int r = 32; | ||
| 47 | |||
| 48 | if (!x) | ||
| 49 | return 0; | ||
| 50 | if (!(x & 0xffff0000u)) { | ||
| 51 | x <<= 16; | ||
| 52 | r -= 16; | ||
| 53 | } | ||
| 54 | if (!(x & 0xff000000u)) { | ||
| 55 | x <<= 8; | ||
| 56 | r -= 8; | ||
| 57 | } | ||
| 58 | if (!(x & 0xf0000000u)) { | ||
| 59 | x <<= 4; | ||
| 60 | r -= 4; | ||
| 61 | } | ||
| 62 | if (!(x & 0xc0000000u)) { | ||
| 63 | x <<= 2; | ||
| 64 | r -= 2; | ||
| 65 | } | ||
| 66 | if (!(x & 0x80000000u)) { | ||
| 67 | x <<= 1; | ||
| 68 | r -= 1; | ||
| 69 | } | ||
| 70 | return r; | ||
| 71 | } | ||
| 72 | |||
| 73 | /* | ||
| 74 | * Include this here because some architectures need generic_ffs/fls in | 6 | * Include this here because some architectures need generic_ffs/fls in |
| 75 | * scope | 7 | * scope |
| 76 | */ | 8 | */ |
| 77 | #include <asm/bitops.h> | 9 | #include <asm/bitops.h> |
| 78 | 10 | ||
| 79 | |||
| 80 | static inline int generic_fls64(__u64 x) | ||
| 81 | { | ||
| 82 | __u32 h = x >> 32; | ||
| 83 | if (h) | ||
| 84 | return fls(h) + 32; | ||
| 85 | return fls(x); | ||
| 86 | } | ||
| 87 | |||
| 88 | static __inline__ int get_bitmask_order(unsigned int count) | 11 | static __inline__ int get_bitmask_order(unsigned int count) |
| 89 | { | 12 | { |
| 90 | int order; | 13 | int order; |
| @@ -103,54 +26,9 @@ static __inline__ int get_count_order(unsigned int count) | |||
| 103 | return order; | 26 | return order; |
| 104 | } | 27 | } |
| 105 | 28 | ||
| 106 | /* | ||
| 107 | * hweightN: returns the hamming weight (i.e. the number | ||
| 108 | * of bits set) of a N-bit word | ||
| 109 | */ | ||
| 110 | |||
| 111 | static inline unsigned int generic_hweight32(unsigned int w) | ||
| 112 | { | ||
| 113 | unsigned int res = (w & 0x55555555) + ((w >> 1) & 0x55555555); | ||
| 114 | res = (res & 0x33333333) + ((res >> 2) & 0x33333333); | ||
| 115 | res = (res & 0x0F0F0F0F) + ((res >> 4) & 0x0F0F0F0F); | ||
| 116 | res = (res & 0x00FF00FF) + ((res >> 8) & 0x00FF00FF); | ||
| 117 | return (res & 0x0000FFFF) + ((res >> 16) & 0x0000FFFF); | ||
| 118 | } | ||
| 119 | |||
| 120 | static inline unsigned int generic_hweight16(unsigned int w) | ||
| 121 | { | ||
| 122 | unsigned int res = (w & 0x5555) + ((w >> 1) & 0x5555); | ||
| 123 | res = (res & 0x3333) + ((res >> 2) & 0x3333); | ||
| 124 | res = (res & 0x0F0F) + ((res >> 4) & 0x0F0F); | ||
| 125 | return (res & 0x00FF) + ((res >> 8) & 0x00FF); | ||
| 126 | } | ||
| 127 | |||
| 128 | static inline unsigned int generic_hweight8(unsigned int w) | ||
| 129 | { | ||
| 130 | unsigned int res = (w & 0x55) + ((w >> 1) & 0x55); | ||
| 131 | res = (res & 0x33) + ((res >> 2) & 0x33); | ||
| 132 | return (res & 0x0F) + ((res >> 4) & 0x0F); | ||
| 133 | } | ||
| 134 | |||
| 135 | static inline unsigned long generic_hweight64(__u64 w) | ||
| 136 | { | ||
| 137 | #if BITS_PER_LONG < 64 | ||
| 138 | return generic_hweight32((unsigned int)(w >> 32)) + | ||
| 139 | generic_hweight32((unsigned int)w); | ||
| 140 | #else | ||
| 141 | u64 res; | ||
| 142 | res = (w & 0x5555555555555555ul) + ((w >> 1) & 0x5555555555555555ul); | ||
| 143 | res = (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul); | ||
| 144 | res = (res & 0x0F0F0F0F0F0F0F0Ful) + ((res >> 4) & 0x0F0F0F0F0F0F0F0Ful); | ||
| 145 | res = (res & 0x00FF00FF00FF00FFul) + ((res >> 8) & 0x00FF00FF00FF00FFul); | ||
| 146 | res = (res & 0x0000FFFF0000FFFFul) + ((res >> 16) & 0x0000FFFF0000FFFFul); | ||
| 147 | return (res & 0x00000000FFFFFFFFul) + ((res >> 32) & 0x00000000FFFFFFFFul); | ||
| 148 | #endif | ||
| 149 | } | ||
| 150 | |||
| 151 | static inline unsigned long hweight_long(unsigned long w) | 29 | static inline unsigned long hweight_long(unsigned long w) |
| 152 | { | 30 | { |
| 153 | return sizeof(w) == 4 ? generic_hweight32(w) : generic_hweight64(w); | 31 | return sizeof(w) == 4 ? hweight32(w) : hweight64(w); |
| 154 | } | 32 | } |
| 155 | 33 | ||
| 156 | /* | 34 | /* |
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h index 7155452fb4a8..de3eb8d8ae26 100644 --- a/include/linux/bootmem.h +++ b/include/linux/bootmem.h | |||
| @@ -38,6 +38,7 @@ typedef struct bootmem_data { | |||
| 38 | unsigned long last_pos; | 38 | unsigned long last_pos; |
| 39 | unsigned long last_success; /* Previous allocation point. To speed | 39 | unsigned long last_success; /* Previous allocation point. To speed |
| 40 | * up searching */ | 40 | * up searching */ |
| 41 | struct list_head list; | ||
| 41 | } bootmem_data_t; | 42 | } bootmem_data_t; |
| 42 | 43 | ||
| 43 | extern unsigned long __init bootmem_bootmap_pages (unsigned long); | 44 | extern unsigned long __init bootmem_bootmap_pages (unsigned long); |
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 9f159baf153f..fb7e9b7ccbe3 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h | |||
| @@ -46,25 +46,28 @@ struct address_space; | |||
| 46 | typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate); | 46 | typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate); |
| 47 | 47 | ||
| 48 | /* | 48 | /* |
| 49 | * Keep related fields in common cachelines. The most commonly accessed | 49 | * Historically, a buffer_head was used to map a single block |
| 50 | * field (b_state) goes at the start so the compiler does not generate | 50 | * within a page, and of course as the unit of I/O through the |
| 51 | * indexed addressing for it. | 51 | * filesystem and block layers. Nowadays the basic I/O unit |
| 52 | * is the bio, and buffer_heads are used for extracting block | ||
| 53 | * mappings (via a get_block_t call), for tracking state within | ||
| 54 | * a page (via a page_mapping) and for wrapping bio submission | ||
| 55 | * for backward compatibility reasons (e.g. submit_bh). | ||
| 52 | */ | 56 | */ |
| 53 | struct buffer_head { | 57 | struct buffer_head { |
| 54 | /* First cache line: */ | ||
| 55 | unsigned long b_state; /* buffer state bitmap (see above) */ | 58 | unsigned long b_state; /* buffer state bitmap (see above) */ |
| 56 | struct buffer_head *b_this_page;/* circular list of page's buffers */ | 59 | struct buffer_head *b_this_page;/* circular list of page's buffers */ |
| 57 | struct page *b_page; /* the page this bh is mapped to */ | 60 | struct page *b_page; /* the page this bh is mapped to */ |
| 58 | atomic_t b_count; /* users using this block */ | ||
| 59 | u32 b_size; /* block size */ | ||
| 60 | 61 | ||
| 61 | sector_t b_blocknr; /* block number */ | 62 | sector_t b_blocknr; /* start block number */ |
| 62 | char *b_data; /* pointer to data block */ | 63 | size_t b_size; /* size of mapping */ |
| 64 | char *b_data; /* pointer to data within the page */ | ||
| 63 | 65 | ||
| 64 | struct block_device *b_bdev; | 66 | struct block_device *b_bdev; |
| 65 | bh_end_io_t *b_end_io; /* I/O completion */ | 67 | bh_end_io_t *b_end_io; /* I/O completion */ |
| 66 | void *b_private; /* reserved for b_end_io */ | 68 | void *b_private; /* reserved for b_end_io */ |
| 67 | struct list_head b_assoc_buffers; /* associated with another mapping */ | 69 | struct list_head b_assoc_buffers; /* associated with another mapping */ |
| 70 | atomic_t b_count; /* users using this buffer_head */ | ||
| 68 | }; | 71 | }; |
| 69 | 72 | ||
| 70 | /* | 73 | /* |
| @@ -189,8 +192,8 @@ extern int buffer_heads_over_limit; | |||
| 189 | * address_spaces. | 192 | * address_spaces. |
| 190 | */ | 193 | */ |
| 191 | int try_to_release_page(struct page * page, gfp_t gfp_mask); | 194 | int try_to_release_page(struct page * page, gfp_t gfp_mask); |
| 192 | int block_invalidatepage(struct page *page, unsigned long offset); | 195 | void block_invalidatepage(struct page *page, unsigned long offset); |
| 193 | int do_invalidatepage(struct page *page, unsigned long offset); | 196 | void do_invalidatepage(struct page *page, unsigned long offset); |
| 194 | int block_write_full_page(struct page *page, get_block_t *get_block, | 197 | int block_write_full_page(struct page *page, get_block_t *get_block, |
| 195 | struct writeback_control *wbc); | 198 | struct writeback_control *wbc); |
| 196 | int block_read_full_page(struct page*, get_block_t*); | 199 | int block_read_full_page(struct page*, get_block_t*); |
| @@ -200,7 +203,7 @@ int cont_prepare_write(struct page*, unsigned, unsigned, get_block_t*, | |||
| 200 | int generic_cont_expand(struct inode *inode, loff_t size); | 203 | int generic_cont_expand(struct inode *inode, loff_t size); |
| 201 | int generic_cont_expand_simple(struct inode *inode, loff_t size); | 204 | int generic_cont_expand_simple(struct inode *inode, loff_t size); |
| 202 | int block_commit_write(struct page *page, unsigned from, unsigned to); | 205 | int block_commit_write(struct page *page, unsigned from, unsigned to); |
| 203 | int block_sync_page(struct page *); | 206 | void block_sync_page(struct page *); |
| 204 | sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *); | 207 | sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *); |
| 205 | int generic_commit_write(struct file *, struct page *, unsigned, unsigned); | 208 | int generic_commit_write(struct file *, struct page *, unsigned, unsigned); |
| 206 | int block_truncate_page(struct address_space *, loff_t, get_block_t *); | 209 | int block_truncate_page(struct address_space *, loff_t, get_block_t *); |
| @@ -277,6 +280,7 @@ map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block) | |||
| 277 | set_buffer_mapped(bh); | 280 | set_buffer_mapped(bh); |
| 278 | bh->b_bdev = sb->s_bdev; | 281 | bh->b_bdev = sb->s_bdev; |
| 279 | bh->b_blocknr = block; | 282 | bh->b_blocknr = block; |
| 283 | bh->b_size = sb->s_blocksize; | ||
| 280 | } | 284 | } |
| 281 | 285 | ||
| 282 | /* | 286 | /* |
diff --git a/include/linux/compat.h b/include/linux/compat.h index c9ab2a26348c..6d3a654be1ae 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h | |||
| @@ -45,6 +45,32 @@ struct compat_tms { | |||
| 45 | compat_clock_t tms_cstime; | 45 | compat_clock_t tms_cstime; |
| 46 | }; | 46 | }; |
| 47 | 47 | ||
| 48 | struct compat_timex { | ||
| 49 | compat_uint_t modes; | ||
| 50 | compat_long_t offset; | ||
| 51 | compat_long_t freq; | ||
| 52 | compat_long_t maxerror; | ||
| 53 | compat_long_t esterror; | ||
| 54 | compat_int_t status; | ||
| 55 | compat_long_t constant; | ||
| 56 | compat_long_t precision; | ||
| 57 | compat_long_t tolerance; | ||
| 58 | struct compat_timeval time; | ||
| 59 | compat_long_t tick; | ||
| 60 | compat_long_t ppsfreq; | ||
| 61 | compat_long_t jitter; | ||
| 62 | compat_int_t shift; | ||
| 63 | compat_long_t stabil; | ||
| 64 | compat_long_t jitcnt; | ||
| 65 | compat_long_t calcnt; | ||
| 66 | compat_long_t errcnt; | ||
| 67 | compat_long_t stbcnt; | ||
| 68 | |||
| 69 | compat_int_t :32; compat_int_t :32; compat_int_t :32; compat_int_t :32; | ||
| 70 | compat_int_t :32; compat_int_t :32; compat_int_t :32; compat_int_t :32; | ||
| 71 | compat_int_t :32; compat_int_t :32; compat_int_t :32; compat_int_t :32; | ||
| 72 | }; | ||
| 73 | |||
| 48 | #define _COMPAT_NSIG_WORDS (_COMPAT_NSIG / _COMPAT_NSIG_BPW) | 74 | #define _COMPAT_NSIG_WORDS (_COMPAT_NSIG / _COMPAT_NSIG_BPW) |
| 49 | 75 | ||
| 50 | typedef struct { | 76 | typedef struct { |
| @@ -121,6 +147,24 @@ typedef struct compat_sigevent { | |||
| 121 | } _sigev_un; | 147 | } _sigev_un; |
| 122 | } compat_sigevent_t; | 148 | } compat_sigevent_t; |
| 123 | 149 | ||
| 150 | struct compat_robust_list { | ||
| 151 | compat_uptr_t next; | ||
| 152 | }; | ||
| 153 | |||
| 154 | struct compat_robust_list_head { | ||
| 155 | struct compat_robust_list list; | ||
| 156 | compat_long_t futex_offset; | ||
| 157 | compat_uptr_t list_op_pending; | ||
| 158 | }; | ||
| 159 | |||
| 160 | extern void compat_exit_robust_list(struct task_struct *curr); | ||
| 161 | |||
| 162 | asmlinkage long | ||
| 163 | compat_sys_set_robust_list(struct compat_robust_list_head __user *head, | ||
| 164 | compat_size_t len); | ||
| 165 | asmlinkage long | ||
| 166 | compat_sys_get_robust_list(int pid, compat_uptr_t *head_ptr, | ||
| 167 | compat_size_t __user *len_ptr); | ||
| 124 | 168 | ||
| 125 | long compat_sys_semctl(int first, int second, int third, void __user *uptr); | 169 | long compat_sys_semctl(int first, int second, int third, void __user *uptr); |
| 126 | long compat_sys_msgsnd(int first, int second, int third, void __user *uptr); | 170 | long compat_sys_msgsnd(int first, int second, int third, void __user *uptr); |
| @@ -181,5 +225,7 @@ static inline int compat_timespec_compare(struct compat_timespec *lhs, | |||
| 181 | return lhs->tv_nsec - rhs->tv_nsec; | 225 | return lhs->tv_nsec - rhs->tv_nsec; |
| 182 | } | 226 | } |
| 183 | 227 | ||
| 228 | asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp); | ||
| 229 | |||
| 184 | #endif /* CONFIG_COMPAT */ | 230 | #endif /* CONFIG_COMPAT */ |
| 185 | #endif /* _LINUX_COMPAT_H */ | 231 | #endif /* _LINUX_COMPAT_H */ |
diff --git a/include/linux/compat_ioctl.h b/include/linux/compat_ioctl.h index efb518f16bb3..89ab677cb993 100644 --- a/include/linux/compat_ioctl.h +++ b/include/linux/compat_ioctl.h | |||
| @@ -140,6 +140,7 @@ COMPATIBLE_IOCTL(DM_TABLE_DEPS_32) | |||
| 140 | COMPATIBLE_IOCTL(DM_TABLE_STATUS_32) | 140 | COMPATIBLE_IOCTL(DM_TABLE_STATUS_32) |
| 141 | COMPATIBLE_IOCTL(DM_LIST_VERSIONS_32) | 141 | COMPATIBLE_IOCTL(DM_LIST_VERSIONS_32) |
| 142 | COMPATIBLE_IOCTL(DM_TARGET_MSG_32) | 142 | COMPATIBLE_IOCTL(DM_TARGET_MSG_32) |
| 143 | COMPATIBLE_IOCTL(DM_DEV_SET_GEOMETRY_32) | ||
| 143 | COMPATIBLE_IOCTL(DM_VERSION) | 144 | COMPATIBLE_IOCTL(DM_VERSION) |
| 144 | COMPATIBLE_IOCTL(DM_REMOVE_ALL) | 145 | COMPATIBLE_IOCTL(DM_REMOVE_ALL) |
| 145 | COMPATIBLE_IOCTL(DM_LIST_DEVICES) | 146 | COMPATIBLE_IOCTL(DM_LIST_DEVICES) |
| @@ -155,6 +156,7 @@ COMPATIBLE_IOCTL(DM_TABLE_DEPS) | |||
| 155 | COMPATIBLE_IOCTL(DM_TABLE_STATUS) | 156 | COMPATIBLE_IOCTL(DM_TABLE_STATUS) |
| 156 | COMPATIBLE_IOCTL(DM_LIST_VERSIONS) | 157 | COMPATIBLE_IOCTL(DM_LIST_VERSIONS) |
| 157 | COMPATIBLE_IOCTL(DM_TARGET_MSG) | 158 | COMPATIBLE_IOCTL(DM_TARGET_MSG) |
| 159 | COMPATIBLE_IOCTL(DM_DEV_SET_GEOMETRY) | ||
| 158 | /* Big K */ | 160 | /* Big K */ |
| 159 | COMPATIBLE_IOCTL(PIO_FONT) | 161 | COMPATIBLE_IOCTL(PIO_FONT) |
| 160 | COMPATIBLE_IOCTL(GIO_FONT) | 162 | COMPATIBLE_IOCTL(GIO_FONT) |
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 51e0e95a421a..aee10b2ea4c6 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h | |||
| @@ -97,6 +97,7 @@ struct io_restrictions { | |||
| 97 | unsigned short hardsect_size; | 97 | unsigned short hardsect_size; |
| 98 | unsigned int max_segment_size; | 98 | unsigned int max_segment_size; |
| 99 | unsigned long seg_boundary_mask; | 99 | unsigned long seg_boundary_mask; |
| 100 | unsigned char no_cluster; /* inverted so that 0 is default */ | ||
| 100 | }; | 101 | }; |
| 101 | 102 | ||
| 102 | struct dm_target { | 103 | struct dm_target { |
diff --git a/include/linux/dm-ioctl.h b/include/linux/dm-ioctl.h index fa75ba0d635e..c67c6786612a 100644 --- a/include/linux/dm-ioctl.h +++ b/include/linux/dm-ioctl.h | |||
| @@ -80,6 +80,16 @@ | |||
| 80 | * | 80 | * |
| 81 | * DM_TARGET_MSG: | 81 | * DM_TARGET_MSG: |
| 82 | * Pass a message string to the target at a specific offset of a device. | 82 | * Pass a message string to the target at a specific offset of a device. |
| 83 | * | ||
| 84 | * DM_DEV_SET_GEOMETRY: | ||
| 85 | * Set the geometry of a device by passing in a string in this format: | ||
| 86 | * | ||
| 87 | * "cylinders heads sectors_per_track start_sector" | ||
| 88 | * | ||
| 89 | * Beware that CHS geometry is nearly obsolete and only provided | ||
| 90 | * for compatibility with dm devices that can be booted by a PC | ||
| 91 | * BIOS. See struct hd_geometry for range limits. Also note that | ||
| 92 | * the geometry is erased if the device size changes. | ||
| 83 | */ | 93 | */ |
| 84 | 94 | ||
| 85 | /* | 95 | /* |
| @@ -218,6 +228,7 @@ enum { | |||
| 218 | /* Added later */ | 228 | /* Added later */ |
| 219 | DM_LIST_VERSIONS_CMD, | 229 | DM_LIST_VERSIONS_CMD, |
| 220 | DM_TARGET_MSG_CMD, | 230 | DM_TARGET_MSG_CMD, |
| 231 | DM_DEV_SET_GEOMETRY_CMD | ||
| 221 | }; | 232 | }; |
| 222 | 233 | ||
| 223 | /* | 234 | /* |
| @@ -247,6 +258,7 @@ typedef char ioctl_struct[308]; | |||
| 247 | #define DM_TABLE_STATUS_32 _IOWR(DM_IOCTL, DM_TABLE_STATUS_CMD, ioctl_struct) | 258 | #define DM_TABLE_STATUS_32 _IOWR(DM_IOCTL, DM_TABLE_STATUS_CMD, ioctl_struct) |
| 248 | #define DM_LIST_VERSIONS_32 _IOWR(DM_IOCTL, DM_LIST_VERSIONS_CMD, ioctl_struct) | 259 | #define DM_LIST_VERSIONS_32 _IOWR(DM_IOCTL, DM_LIST_VERSIONS_CMD, ioctl_struct) |
| 249 | #define DM_TARGET_MSG_32 _IOWR(DM_IOCTL, DM_TARGET_MSG_CMD, ioctl_struct) | 260 | #define DM_TARGET_MSG_32 _IOWR(DM_IOCTL, DM_TARGET_MSG_CMD, ioctl_struct) |
| 261 | #define DM_DEV_SET_GEOMETRY_32 _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, ioctl_struct) | ||
| 250 | #endif | 262 | #endif |
| 251 | 263 | ||
| 252 | #define DM_IOCTL 0xfd | 264 | #define DM_IOCTL 0xfd |
| @@ -270,11 +282,12 @@ typedef char ioctl_struct[308]; | |||
| 270 | #define DM_LIST_VERSIONS _IOWR(DM_IOCTL, DM_LIST_VERSIONS_CMD, struct dm_ioctl) | 282 | #define DM_LIST_VERSIONS _IOWR(DM_IOCTL, DM_LIST_VERSIONS_CMD, struct dm_ioctl) |
| 271 | 283 | ||
| 272 | #define DM_TARGET_MSG _IOWR(DM_IOCTL, DM_TARGET_MSG_CMD, struct dm_ioctl) | 284 | #define DM_TARGET_MSG _IOWR(DM_IOCTL, DM_TARGET_MSG_CMD, struct dm_ioctl) |
| 285 | #define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl) | ||
| 273 | 286 | ||
| 274 | #define DM_VERSION_MAJOR 4 | 287 | #define DM_VERSION_MAJOR 4 |
| 275 | #define DM_VERSION_MINOR 5 | 288 | #define DM_VERSION_MINOR 6 |
| 276 | #define DM_VERSION_PATCHLEVEL 0 | 289 | #define DM_VERSION_PATCHLEVEL 0 |
| 277 | #define DM_VERSION_EXTRA "-ioctl (2005-10-04)" | 290 | #define DM_VERSION_EXTRA "-ioctl (2006-02-17)" |
| 278 | 291 | ||
| 279 | /* Status bits */ | 292 | /* Status bits */ |
| 280 | #define DM_READONLY_FLAG (1 << 0) /* In/Out */ | 293 | #define DM_READONLY_FLAG (1 << 0) /* In/Out */ |
diff --git a/include/linux/efi.h b/include/linux/efi.h index c7c5dd316182..e203613d3aec 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h | |||
| @@ -240,19 +240,21 @@ struct efi_memory_map { | |||
| 240 | unsigned long desc_size; | 240 | unsigned long desc_size; |
| 241 | }; | 241 | }; |
| 242 | 242 | ||
| 243 | #define EFI_INVALID_TABLE_ADDR (~0UL) | ||
| 244 | |||
| 243 | /* | 245 | /* |
| 244 | * All runtime access to EFI goes through this structure: | 246 | * All runtime access to EFI goes through this structure: |
| 245 | */ | 247 | */ |
| 246 | extern struct efi { | 248 | extern struct efi { |
| 247 | efi_system_table_t *systab; /* EFI system table */ | 249 | efi_system_table_t *systab; /* EFI system table */ |
| 248 | void *mps; /* MPS table */ | 250 | unsigned long mps; /* MPS table */ |
| 249 | void *acpi; /* ACPI table (IA64 ext 0.71) */ | 251 | unsigned long acpi; /* ACPI table (IA64 ext 0.71) */ |
| 250 | void *acpi20; /* ACPI table (ACPI 2.0) */ | 252 | unsigned long acpi20; /* ACPI table (ACPI 2.0) */ |
| 251 | void *smbios; /* SM BIOS table */ | 253 | unsigned long smbios; /* SM BIOS table */ |
| 252 | void *sal_systab; /* SAL system table */ | 254 | unsigned long sal_systab; /* SAL system table */ |
| 253 | void *boot_info; /* boot info table */ | 255 | unsigned long boot_info; /* boot info table */ |
| 254 | void *hcdp; /* HCDP table */ | 256 | unsigned long hcdp; /* HCDP table */ |
| 255 | void *uga; /* UGA table */ | 257 | unsigned long uga; /* UGA table */ |
| 256 | efi_get_time_t *get_time; | 258 | efi_get_time_t *get_time; |
| 257 | efi_set_time_t *set_time; | 259 | efi_set_time_t *set_time; |
| 258 | efi_get_wakeup_time_t *get_wakeup_time; | 260 | efi_get_wakeup_time_t *get_wakeup_time; |
| @@ -292,6 +294,8 @@ extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if pos | |||
| 292 | extern u64 efi_get_iobase (void); | 294 | extern u64 efi_get_iobase (void); |
| 293 | extern u32 efi_mem_type (unsigned long phys_addr); | 295 | extern u32 efi_mem_type (unsigned long phys_addr); |
| 294 | extern u64 efi_mem_attributes (unsigned long phys_addr); | 296 | extern u64 efi_mem_attributes (unsigned long phys_addr); |
| 297 | extern int efi_mem_attribute_range (unsigned long phys_addr, unsigned long size, | ||
| 298 | u64 attr); | ||
| 295 | extern int __init efi_uart_console_only (void); | 299 | extern int __init efi_uart_console_only (void); |
| 296 | extern void efi_initialize_iomem_resources(struct resource *code_resource, | 300 | extern void efi_initialize_iomem_resources(struct resource *code_resource, |
| 297 | struct resource *data_resource); | 301 | struct resource *data_resource); |
diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h index e7239f2f97a1..8bb4f842cded 100644 --- a/include/linux/ext3_fs.h +++ b/include/linux/ext3_fs.h | |||
| @@ -36,7 +36,8 @@ struct statfs; | |||
| 36 | * Define EXT3_RESERVATION to reserve data blocks for expanding files | 36 | * Define EXT3_RESERVATION to reserve data blocks for expanding files |
| 37 | */ | 37 | */ |
| 38 | #define EXT3_DEFAULT_RESERVE_BLOCKS 8 | 38 | #define EXT3_DEFAULT_RESERVE_BLOCKS 8 |
| 39 | #define EXT3_MAX_RESERVE_BLOCKS 1024 | 39 | /*max window size: 1024(direct blocks) + 3([t,d]indirect blocks) */ |
| 40 | #define EXT3_MAX_RESERVE_BLOCKS 1027 | ||
| 40 | #define EXT3_RESERVE_WINDOW_NOT_ALLOCATED 0 | 41 | #define EXT3_RESERVE_WINDOW_NOT_ALLOCATED 0 |
| 41 | /* | 42 | /* |
| 42 | * Always enable hashed directories | 43 | * Always enable hashed directories |
| @@ -732,6 +733,8 @@ struct dir_private_info { | |||
| 732 | extern int ext3_bg_has_super(struct super_block *sb, int group); | 733 | extern int ext3_bg_has_super(struct super_block *sb, int group); |
| 733 | extern unsigned long ext3_bg_num_gdb(struct super_block *sb, int group); | 734 | extern unsigned long ext3_bg_num_gdb(struct super_block *sb, int group); |
| 734 | extern int ext3_new_block (handle_t *, struct inode *, unsigned long, int *); | 735 | extern int ext3_new_block (handle_t *, struct inode *, unsigned long, int *); |
| 736 | extern int ext3_new_blocks (handle_t *, struct inode *, unsigned long, | ||
| 737 | unsigned long *, int *); | ||
| 735 | extern void ext3_free_blocks (handle_t *, struct inode *, unsigned long, | 738 | extern void ext3_free_blocks (handle_t *, struct inode *, unsigned long, |
| 736 | unsigned long); | 739 | unsigned long); |
| 737 | extern void ext3_free_blocks_sb (handle_t *, struct super_block *, | 740 | extern void ext3_free_blocks_sb (handle_t *, struct super_block *, |
| @@ -775,9 +778,9 @@ extern unsigned long ext3_count_free (struct buffer_head *, unsigned); | |||
| 775 | int ext3_forget(handle_t *, int, struct inode *, struct buffer_head *, int); | 778 | int ext3_forget(handle_t *, int, struct inode *, struct buffer_head *, int); |
| 776 | struct buffer_head * ext3_getblk (handle_t *, struct inode *, long, int, int *); | 779 | struct buffer_head * ext3_getblk (handle_t *, struct inode *, long, int, int *); |
| 777 | struct buffer_head * ext3_bread (handle_t *, struct inode *, int, int, int *); | 780 | struct buffer_head * ext3_bread (handle_t *, struct inode *, int, int, int *); |
| 778 | int ext3_get_block_handle(handle_t *handle, struct inode *inode, | 781 | int ext3_get_blocks_handle(handle_t *handle, struct inode *inode, |
| 779 | sector_t iblock, struct buffer_head *bh_result, int create, | 782 | sector_t iblock, unsigned long maxblocks, struct buffer_head *bh_result, |
| 780 | int extend_disksize); | 783 | int create, int extend_disksize); |
| 781 | 784 | ||
| 782 | extern void ext3_read_inode (struct inode *); | 785 | extern void ext3_read_inode (struct inode *); |
| 783 | extern int ext3_write_inode (struct inode *, int); | 786 | extern int ext3_write_inode (struct inode *, int); |
diff --git a/include/linux/fs.h b/include/linux/fs.h index 5adf32b90f36..680d913350e7 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
| @@ -252,9 +252,6 @@ extern void __init files_init(unsigned long); | |||
| 252 | struct buffer_head; | 252 | struct buffer_head; |
| 253 | typedef int (get_block_t)(struct inode *inode, sector_t iblock, | 253 | typedef int (get_block_t)(struct inode *inode, sector_t iblock, |
| 254 | struct buffer_head *bh_result, int create); | 254 | struct buffer_head *bh_result, int create); |
| 255 | typedef int (get_blocks_t)(struct inode *inode, sector_t iblock, | ||
| 256 | unsigned long max_blocks, | ||
| 257 | struct buffer_head *bh_result, int create); | ||
| 258 | typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset, | 255 | typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset, |
| 259 | ssize_t bytes, void *private); | 256 | ssize_t bytes, void *private); |
| 260 | 257 | ||
| @@ -350,7 +347,7 @@ struct writeback_control; | |||
| 350 | struct address_space_operations { | 347 | struct address_space_operations { |
| 351 | int (*writepage)(struct page *page, struct writeback_control *wbc); | 348 | int (*writepage)(struct page *page, struct writeback_control *wbc); |
| 352 | int (*readpage)(struct file *, struct page *); | 349 | int (*readpage)(struct file *, struct page *); |
| 353 | int (*sync_page)(struct page *); | 350 | void (*sync_page)(struct page *); |
| 354 | 351 | ||
| 355 | /* Write back some dirty pages from this mapping. */ | 352 | /* Write back some dirty pages from this mapping. */ |
| 356 | int (*writepages)(struct address_space *, struct writeback_control *); | 353 | int (*writepages)(struct address_space *, struct writeback_control *); |
| @@ -369,7 +366,7 @@ struct address_space_operations { | |||
| 369 | int (*commit_write)(struct file *, struct page *, unsigned, unsigned); | 366 | int (*commit_write)(struct file *, struct page *, unsigned, unsigned); |
| 370 | /* Unfortunately this kludge is needed for FIBMAP. Don't use it */ | 367 | /* Unfortunately this kludge is needed for FIBMAP. Don't use it */ |
| 371 | sector_t (*bmap)(struct address_space *, sector_t); | 368 | sector_t (*bmap)(struct address_space *, sector_t); |
| 372 | int (*invalidatepage) (struct page *, unsigned long); | 369 | void (*invalidatepage) (struct page *, unsigned long); |
| 373 | int (*releasepage) (struct page *, gfp_t); | 370 | int (*releasepage) (struct page *, gfp_t); |
| 374 | ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov, | 371 | ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov, |
| 375 | loff_t offset, unsigned long nr_segs); | 372 | loff_t offset, unsigned long nr_segs); |
| @@ -413,6 +410,9 @@ struct block_device { | |||
| 413 | struct list_head bd_inodes; | 410 | struct list_head bd_inodes; |
| 414 | void * bd_holder; | 411 | void * bd_holder; |
| 415 | int bd_holders; | 412 | int bd_holders; |
| 413 | #ifdef CONFIG_SYSFS | ||
| 414 | struct list_head bd_holder_list; | ||
| 415 | #endif | ||
| 416 | struct block_device * bd_contains; | 416 | struct block_device * bd_contains; |
| 417 | unsigned bd_block_size; | 417 | unsigned bd_block_size; |
| 418 | struct hd_struct * bd_part; | 418 | struct hd_struct * bd_part; |
| @@ -490,7 +490,7 @@ struct inode { | |||
| 490 | unsigned int i_blkbits; | 490 | unsigned int i_blkbits; |
| 491 | unsigned long i_blksize; | 491 | unsigned long i_blksize; |
| 492 | unsigned long i_version; | 492 | unsigned long i_version; |
| 493 | unsigned long i_blocks; | 493 | blkcnt_t i_blocks; |
| 494 | unsigned short i_bytes; | 494 | unsigned short i_bytes; |
| 495 | spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */ | 495 | spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */ |
| 496 | struct mutex i_mutex; | 496 | struct mutex i_mutex; |
| @@ -763,6 +763,7 @@ extern void locks_copy_lock(struct file_lock *, struct file_lock *); | |||
| 763 | extern void locks_remove_posix(struct file *, fl_owner_t); | 763 | extern void locks_remove_posix(struct file *, fl_owner_t); |
| 764 | extern void locks_remove_flock(struct file *); | 764 | extern void locks_remove_flock(struct file *); |
| 765 | extern int posix_test_lock(struct file *, struct file_lock *, struct file_lock *); | 765 | extern int posix_test_lock(struct file *, struct file_lock *, struct file_lock *); |
| 766 | extern int posix_lock_file_conf(struct file *, struct file_lock *, struct file_lock *); | ||
| 766 | extern int posix_lock_file(struct file *, struct file_lock *); | 767 | extern int posix_lock_file(struct file *, struct file_lock *); |
| 767 | extern int posix_lock_file_wait(struct file *, struct file_lock *); | 768 | extern int posix_lock_file_wait(struct file *, struct file_lock *); |
| 768 | extern int posix_unblock_lock(struct file *, struct file_lock *); | 769 | extern int posix_unblock_lock(struct file *, struct file_lock *); |
| @@ -1401,6 +1402,13 @@ extern int blkdev_get(struct block_device *, mode_t, unsigned); | |||
| 1401 | extern int blkdev_put(struct block_device *); | 1402 | extern int blkdev_put(struct block_device *); |
| 1402 | extern int bd_claim(struct block_device *, void *); | 1403 | extern int bd_claim(struct block_device *, void *); |
| 1403 | extern void bd_release(struct block_device *); | 1404 | extern void bd_release(struct block_device *); |
| 1405 | #ifdef CONFIG_SYSFS | ||
| 1406 | extern int bd_claim_by_disk(struct block_device *, void *, struct gendisk *); | ||
| 1407 | extern void bd_release_from_disk(struct block_device *, struct gendisk *); | ||
| 1408 | #else | ||
| 1409 | #define bd_claim_by_disk(bdev, holder, disk) bd_claim(bdev, holder) | ||
| 1410 | #define bd_release_from_disk(bdev, disk) bd_release(bdev) | ||
| 1411 | #endif | ||
| 1404 | 1412 | ||
| 1405 | /* fs/char_dev.c */ | 1413 | /* fs/char_dev.c */ |
| 1406 | extern int alloc_chrdev_region(dev_t *, unsigned, unsigned, const char *); | 1414 | extern int alloc_chrdev_region(dev_t *, unsigned, unsigned, const char *); |
| @@ -1644,7 +1652,7 @@ static inline void do_generic_file_read(struct file * filp, loff_t *ppos, | |||
| 1644 | 1652 | ||
| 1645 | ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, | 1653 | ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, |
| 1646 | struct block_device *bdev, const struct iovec *iov, loff_t offset, | 1654 | struct block_device *bdev, const struct iovec *iov, loff_t offset, |
| 1647 | unsigned long nr_segs, get_blocks_t get_blocks, dio_iodone_t end_io, | 1655 | unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io, |
| 1648 | int lock_type); | 1656 | int lock_type); |
| 1649 | 1657 | ||
| 1650 | enum { | 1658 | enum { |
| @@ -1655,29 +1663,29 @@ enum { | |||
| 1655 | 1663 | ||
| 1656 | static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb, | 1664 | static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb, |
| 1657 | struct inode *inode, struct block_device *bdev, const struct iovec *iov, | 1665 | struct inode *inode, struct block_device *bdev, const struct iovec *iov, |
| 1658 | loff_t offset, unsigned long nr_segs, get_blocks_t get_blocks, | 1666 | loff_t offset, unsigned long nr_segs, get_block_t get_block, |
| 1659 | dio_iodone_t end_io) | 1667 | dio_iodone_t end_io) |
| 1660 | { | 1668 | { |
| 1661 | return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset, | 1669 | return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset, |
| 1662 | nr_segs, get_blocks, end_io, DIO_LOCKING); | 1670 | nr_segs, get_block, end_io, DIO_LOCKING); |
| 1663 | } | 1671 | } |
| 1664 | 1672 | ||
| 1665 | static inline ssize_t blockdev_direct_IO_no_locking(int rw, struct kiocb *iocb, | 1673 | static inline ssize_t blockdev_direct_IO_no_locking(int rw, struct kiocb *iocb, |
| 1666 | struct inode *inode, struct block_device *bdev, const struct iovec *iov, | 1674 | struct inode *inode, struct block_device *bdev, const struct iovec *iov, |
| 1667 | loff_t offset, unsigned long nr_segs, get_blocks_t get_blocks, | 1675 | loff_t offset, unsigned long nr_segs, get_block_t get_block, |
| 1668 | dio_iodone_t end_io) | 1676 | dio_iodone_t end_io) |
| 1669 | { | 1677 | { |
| 1670 | return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset, | 1678 | return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset, |
| 1671 | nr_segs, get_blocks, end_io, DIO_NO_LOCKING); | 1679 | nr_segs, get_block, end_io, DIO_NO_LOCKING); |
| 1672 | } | 1680 | } |
| 1673 | 1681 | ||
| 1674 | static inline ssize_t blockdev_direct_IO_own_locking(int rw, struct kiocb *iocb, | 1682 | static inline ssize_t blockdev_direct_IO_own_locking(int rw, struct kiocb *iocb, |
| 1675 | struct inode *inode, struct block_device *bdev, const struct iovec *iov, | 1683 | struct inode *inode, struct block_device *bdev, const struct iovec *iov, |
| 1676 | loff_t offset, unsigned long nr_segs, get_blocks_t get_blocks, | 1684 | loff_t offset, unsigned long nr_segs, get_block_t get_block, |
| 1677 | dio_iodone_t end_io) | 1685 | dio_iodone_t end_io) |
| 1678 | { | 1686 | { |
| 1679 | return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset, | 1687 | return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset, |
| 1680 | nr_segs, get_blocks, end_io, DIO_OWN_LOCKING); | 1688 | nr_segs, get_block, end_io, DIO_OWN_LOCKING); |
| 1681 | } | 1689 | } |
| 1682 | 1690 | ||
| 1683 | extern struct file_operations generic_ro_fops; | 1691 | extern struct file_operations generic_ro_fops; |
diff --git a/include/linux/futex.h b/include/linux/futex.h index 10f96c31971e..966a5b3da439 100644 --- a/include/linux/futex.h +++ b/include/linux/futex.h | |||
| @@ -1,6 +1,8 @@ | |||
| 1 | #ifndef _LINUX_FUTEX_H | 1 | #ifndef _LINUX_FUTEX_H |
| 2 | #define _LINUX_FUTEX_H | 2 | #define _LINUX_FUTEX_H |
| 3 | 3 | ||
| 4 | #include <linux/sched.h> | ||
| 5 | |||
| 4 | /* Second argument to futex syscall */ | 6 | /* Second argument to futex syscall */ |
| 5 | 7 | ||
| 6 | 8 | ||
| @@ -11,10 +13,97 @@ | |||
| 11 | #define FUTEX_CMP_REQUEUE 4 | 13 | #define FUTEX_CMP_REQUEUE 4 |
| 12 | #define FUTEX_WAKE_OP 5 | 14 | #define FUTEX_WAKE_OP 5 |
| 13 | 15 | ||
| 16 | /* | ||
| 17 | * Support for robust futexes: the kernel cleans up held futexes at | ||
| 18 | * thread exit time. | ||
| 19 | */ | ||
| 20 | |||
| 21 | /* | ||
| 22 | * Per-lock list entry - embedded in user-space locks, somewhere close | ||
| 23 | * to the futex field. (Note: user-space uses a double-linked list to | ||
| 24 | * achieve O(1) list add and remove, but the kernel only needs to know | ||
| 25 | * about the forward link) | ||
| 26 | * | ||
| 27 | * NOTE: this structure is part of the syscall ABI, and must not be | ||
| 28 | * changed. | ||
| 29 | */ | ||
| 30 | struct robust_list { | ||
| 31 | struct robust_list __user *next; | ||
| 32 | }; | ||
| 33 | |||
| 34 | /* | ||
| 35 | * Per-thread list head: | ||
| 36 | * | ||
| 37 | * NOTE: this structure is part of the syscall ABI, and must only be | ||
| 38 | * changed if the change is first communicated with the glibc folks. | ||
| 39 | * (When an incompatible change is done, we'll increase the structure | ||
| 40 | * size, which glibc will detect) | ||
| 41 | */ | ||
| 42 | struct robust_list_head { | ||
| 43 | /* | ||
| 44 | * The head of the list. Points back to itself if empty: | ||
| 45 | */ | ||
| 46 | struct robust_list list; | ||
| 47 | |||
| 48 | /* | ||
| 49 | * This relative offset is set by user-space, it gives the kernel | ||
| 50 | * the relative position of the futex field to examine. This way | ||
| 51 | * we keep userspace flexible, to freely shape its data-structure, | ||
| 52 | * without hardcoding any particular offset into the kernel: | ||
| 53 | */ | ||
| 54 | long futex_offset; | ||
| 55 | |||
| 56 | /* | ||
| 57 | * The death of the thread may race with userspace setting | ||
| 58 | * up a lock's links. So to handle this race, userspace first | ||
| 59 | * sets this field to the address of the to-be-taken lock, | ||
| 60 | * then does the lock acquire, and then adds itself to the | ||
| 61 | * list, and then clears this field. Hence the kernel will | ||
| 62 | * always have full knowledge of all locks that the thread | ||
| 63 | * _might_ have taken. We check the owner TID in any case, | ||
| 64 | * so only truly owned locks will be handled. | ||
| 65 | */ | ||
| 66 | struct robust_list __user *list_op_pending; | ||
| 67 | }; | ||
| 68 | |||
| 69 | /* | ||
| 70 | * Are there any waiters for this robust futex: | ||
| 71 | */ | ||
| 72 | #define FUTEX_WAITERS 0x80000000 | ||
| 73 | |||
| 74 | /* | ||
| 75 | * The kernel signals via this bit that a thread holding a futex | ||
| 76 | * has exited without unlocking the futex. The kernel also does | ||
| 77 | * a FUTEX_WAKE on such futexes, after setting the bit, to wake | ||
| 78 | * up any possible waiters: | ||
| 79 | */ | ||
| 80 | #define FUTEX_OWNER_DIED 0x40000000 | ||
| 81 | |||
| 82 | /* | ||
| 83 | * The rest of the robust-futex field is for the TID: | ||
| 84 | */ | ||
| 85 | #define FUTEX_TID_MASK 0x3fffffff | ||
| 86 | |||
| 87 | /* | ||
| 88 | * This limit protects against a deliberately circular list. | ||
| 89 | * (Not worth introducing an rlimit for it) | ||
| 90 | */ | ||
| 91 | #define ROBUST_LIST_LIMIT 2048 | ||
| 92 | |||
| 14 | long do_futex(unsigned long uaddr, int op, int val, | 93 | long do_futex(unsigned long uaddr, int op, int val, |
| 15 | unsigned long timeout, unsigned long uaddr2, int val2, | 94 | unsigned long timeout, unsigned long uaddr2, int val2, |
| 16 | int val3); | 95 | int val3); |
| 17 | 96 | ||
| 97 | extern int handle_futex_death(u32 __user *uaddr, struct task_struct *curr); | ||
| 98 | |||
| 99 | #ifdef CONFIG_FUTEX | ||
| 100 | extern void exit_robust_list(struct task_struct *curr); | ||
| 101 | #else | ||
| 102 | static inline void exit_robust_list(struct task_struct *curr) | ||
| 103 | { | ||
| 104 | } | ||
| 105 | #endif | ||
| 106 | |||
| 18 | #define FUTEX_OP_SET 0 /* *(int *)UADDR2 = OPARG; */ | 107 | #define FUTEX_OP_SET 0 /* *(int *)UADDR2 = OPARG; */ |
| 19 | #define FUTEX_OP_ADD 1 /* *(int *)UADDR2 += OPARG; */ | 108 | #define FUTEX_OP_ADD 1 /* *(int *)UADDR2 += OPARG; */ |
| 20 | #define FUTEX_OP_OR 2 /* *(int *)UADDR2 |= OPARG; */ | 109 | #define FUTEX_OP_OR 2 /* *(int *)UADDR2 |= OPARG; */ |
diff --git a/include/linux/gameport.h b/include/linux/gameport.h index 2401dea2b867..9c8e6da2393b 100644 --- a/include/linux/gameport.h +++ b/include/linux/gameport.h | |||
| @@ -119,7 +119,7 @@ static inline void gameport_set_name(struct gameport *gameport, const char *name | |||
| 119 | } | 119 | } |
| 120 | 120 | ||
| 121 | /* | 121 | /* |
| 122 | * Use the following fucntions to manipulate gameport's per-port | 122 | * Use the following functions to manipulate gameport's per-port |
| 123 | * driver-specific data. | 123 | * driver-specific data. |
| 124 | */ | 124 | */ |
| 125 | static inline void *gameport_get_drvdata(struct gameport *gameport) | 125 | static inline void *gameport_get_drvdata(struct gameport *gameport) |
| @@ -133,7 +133,7 @@ static inline void gameport_set_drvdata(struct gameport *gameport, void *data) | |||
| 133 | } | 133 | } |
| 134 | 134 | ||
| 135 | /* | 135 | /* |
| 136 | * Use the following fucntions to pin gameport's driver in process context | 136 | * Use the following functions to pin gameport's driver in process context |
| 137 | */ | 137 | */ |
| 138 | static inline int gameport_pin_driver(struct gameport *gameport) | 138 | static inline int gameport_pin_driver(struct gameport *gameport) |
| 139 | { | 139 | { |
diff --git a/include/linux/genhd.h b/include/linux/genhd.h index fd647fde5ec1..3c1b0294a742 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h | |||
| @@ -78,6 +78,7 @@ struct hd_struct { | |||
| 78 | sector_t start_sect; | 78 | sector_t start_sect; |
| 79 | sector_t nr_sects; | 79 | sector_t nr_sects; |
| 80 | struct kobject kobj; | 80 | struct kobject kobj; |
| 81 | struct kobject *holder_dir; | ||
| 81 | unsigned ios[2], sectors[2]; /* READs and WRITEs */ | 82 | unsigned ios[2], sectors[2]; /* READs and WRITEs */ |
| 82 | int policy, partno; | 83 | int policy, partno; |
| 83 | }; | 84 | }; |
| @@ -89,12 +90,12 @@ struct hd_struct { | |||
| 89 | #define GENHD_FL_SUPPRESS_PARTITION_INFO 32 | 90 | #define GENHD_FL_SUPPRESS_PARTITION_INFO 32 |
| 90 | 91 | ||
| 91 | struct disk_stats { | 92 | struct disk_stats { |
| 92 | unsigned sectors[2]; /* READs and WRITEs */ | 93 | unsigned long sectors[2]; /* READs and WRITEs */ |
| 93 | unsigned ios[2]; | 94 | unsigned long ios[2]; |
| 94 | unsigned merges[2]; | 95 | unsigned long merges[2]; |
| 95 | unsigned ticks[2]; | 96 | unsigned long ticks[2]; |
| 96 | unsigned io_ticks; | 97 | unsigned long io_ticks; |
| 97 | unsigned time_in_queue; | 98 | unsigned long time_in_queue; |
| 98 | }; | 99 | }; |
| 99 | 100 | ||
| 100 | struct gendisk { | 101 | struct gendisk { |
| @@ -114,6 +115,8 @@ struct gendisk { | |||
| 114 | int number; /* more of the same */ | 115 | int number; /* more of the same */ |
| 115 | struct device *driverfs_dev; | 116 | struct device *driverfs_dev; |
| 116 | struct kobject kobj; | 117 | struct kobject kobj; |
| 118 | struct kobject *holder_dir; | ||
| 119 | struct kobject *slave_dir; | ||
| 117 | 120 | ||
| 118 | struct timer_rand_state *random; | 121 | struct timer_rand_state *random; |
| 119 | int policy; | 122 | int policy; |
diff --git a/include/linux/gigaset_dev.h b/include/linux/gigaset_dev.h new file mode 100644 index 000000000000..70ad09c8ad1e --- /dev/null +++ b/include/linux/gigaset_dev.h | |||
| @@ -0,0 +1,32 @@ | |||
| 1 | /* | ||
| 2 | * interface to user space for the gigaset driver | ||
| 3 | * | ||
| 4 | * Copyright (c) 2004 by Hansjoerg Lipp <hjlipp@web.de> | ||
| 5 | * | ||
| 6 | * ===================================================================== | ||
| 7 | * This program is free software; you can redistribute it and/or | ||
| 8 | * modify it under the terms of the GNU General Public License as | ||
| 9 | * published by the Free Software Foundation; either version 2 of | ||
| 10 | * the License, or (at your option) any later version. | ||
| 11 | * ===================================================================== | ||
| 12 | * Version: $Id: gigaset_dev.h,v 1.4.4.4 2005/11/21 22:28:09 hjlipp Exp $ | ||
| 13 | * ===================================================================== | ||
| 14 | */ | ||
| 15 | |||
| 16 | #ifndef GIGASET_INTERFACE_H | ||
| 17 | #define GIGASET_INTERFACE_H | ||
| 18 | |||
| 19 | #include <linux/ioctl.h> | ||
| 20 | |||
| 21 | #define GIGASET_IOCTL 0x47 | ||
| 22 | |||
| 23 | #define GIGVER_DRIVER 0 | ||
| 24 | #define GIGVER_COMPAT 1 | ||
| 25 | #define GIGVER_FWBASE 2 | ||
| 26 | |||
| 27 | #define GIGASET_REDIR _IOWR (GIGASET_IOCTL, 0, int) | ||
| 28 | #define GIGASET_CONFIG _IOWR (GIGASET_IOCTL, 1, int) | ||
| 29 | #define GIGASET_BRKCHARS _IOW (GIGASET_IOCTL, 2, unsigned char[6]) //FIXME [6] okay? | ||
| 30 | #define GIGASET_VERSION _IOWR (GIGASET_IOCTL, 3, unsigned[4]) | ||
| 31 | |||
| 32 | #endif | ||
diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 6bece9280eb7..892c4ea1b425 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h | |||
| @@ -7,6 +7,18 @@ | |||
| 7 | 7 | ||
| 8 | #include <asm/cacheflush.h> | 8 | #include <asm/cacheflush.h> |
| 9 | 9 | ||
| 10 | #ifndef ARCH_HAS_FLUSH_ANON_PAGE | ||
| 11 | static inline void flush_anon_page(struct page *page, unsigned long vmaddr) | ||
| 12 | { | ||
| 13 | } | ||
| 14 | #endif | ||
| 15 | |||
| 16 | #ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE | ||
| 17 | static inline void flush_kernel_dcache_page(struct page *page) | ||
| 18 | { | ||
| 19 | } | ||
| 20 | #endif | ||
| 21 | |||
| 10 | #ifdef CONFIG_HIGHMEM | 22 | #ifdef CONFIG_HIGHMEM |
| 11 | 23 | ||
| 12 | #include <asm/highmem.h> | 24 | #include <asm/highmem.h> |
diff --git a/include/linux/hpet.h b/include/linux/hpet.h index 27238194b212..707f7cb9e795 100644 --- a/include/linux/hpet.h +++ b/include/linux/hpet.h | |||
| @@ -3,6 +3,8 @@ | |||
| 3 | 3 | ||
| 4 | #include <linux/compiler.h> | 4 | #include <linux/compiler.h> |
| 5 | 5 | ||
| 6 | #ifdef __KERNEL__ | ||
| 7 | |||
| 6 | /* | 8 | /* |
| 7 | * Offsets into HPET Registers | 9 | * Offsets into HPET Registers |
| 8 | */ | 10 | */ |
| @@ -85,22 +87,6 @@ struct hpet { | |||
| 85 | #define Tn_FSB_INT_ADDR_SHIFT (32UL) | 87 | #define Tn_FSB_INT_ADDR_SHIFT (32UL) |
| 86 | #define Tn_FSB_INT_VAL_MASK (0x00000000ffffffffULL) | 88 | #define Tn_FSB_INT_VAL_MASK (0x00000000ffffffffULL) |
| 87 | 89 | ||
| 88 | struct hpet_info { | ||
| 89 | unsigned long hi_ireqfreq; /* Hz */ | ||
| 90 | unsigned long hi_flags; /* information */ | ||
| 91 | unsigned short hi_hpet; | ||
| 92 | unsigned short hi_timer; | ||
| 93 | }; | ||
| 94 | |||
| 95 | #define HPET_INFO_PERIODIC 0x0001 /* timer is periodic */ | ||
| 96 | |||
| 97 | #define HPET_IE_ON _IO('h', 0x01) /* interrupt on */ | ||
| 98 | #define HPET_IE_OFF _IO('h', 0x02) /* interrupt off */ | ||
| 99 | #define HPET_INFO _IOR('h', 0x03, struct hpet_info) | ||
| 100 | #define HPET_EPI _IO('h', 0x04) /* enable periodic */ | ||
| 101 | #define HPET_DPI _IO('h', 0x05) /* disable periodic */ | ||
| 102 | #define HPET_IRQFREQ _IOW('h', 0x6, unsigned long) /* IRQFREQ usec */ | ||
| 103 | |||
| 104 | /* | 90 | /* |
| 105 | * exported interfaces | 91 | * exported interfaces |
| 106 | */ | 92 | */ |
| @@ -133,4 +119,22 @@ int hpet_register(struct hpet_task *, int); | |||
| 133 | int hpet_unregister(struct hpet_task *); | 119 | int hpet_unregister(struct hpet_task *); |
| 134 | int hpet_control(struct hpet_task *, unsigned int, unsigned long); | 120 | int hpet_control(struct hpet_task *, unsigned int, unsigned long); |
| 135 | 121 | ||
| 122 | #endif /* __KERNEL__ */ | ||
| 123 | |||
| 124 | struct hpet_info { | ||
| 125 | unsigned long hi_ireqfreq; /* Hz */ | ||
| 126 | unsigned long hi_flags; /* information */ | ||
| 127 | unsigned short hi_hpet; | ||
| 128 | unsigned short hi_timer; | ||
| 129 | }; | ||
| 130 | |||
| 131 | #define HPET_INFO_PERIODIC 0x0001 /* timer is periodic */ | ||
| 132 | |||
| 133 | #define HPET_IE_ON _IO('h', 0x01) /* interrupt on */ | ||
| 134 | #define HPET_IE_OFF _IO('h', 0x02) /* interrupt off */ | ||
| 135 | #define HPET_INFO _IOR('h', 0x03, struct hpet_info) | ||
| 136 | #define HPET_EPI _IO('h', 0x04) /* enable periodic */ | ||
| 137 | #define HPET_DPI _IO('h', 0x05) /* disable periodic */ | ||
| 138 | #define HPET_IRQFREQ _IOW('h', 0x6, unsigned long) /* IRQFREQ usec */ | ||
| 139 | |||
| 136 | #endif /* !__HPET__ */ | 140 | #endif /* !__HPET__ */ |
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 6401c31d6add..93830158348e 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h | |||
| @@ -34,15 +34,7 @@ enum hrtimer_restart { | |||
| 34 | HRTIMER_RESTART, | 34 | HRTIMER_RESTART, |
| 35 | }; | 35 | }; |
| 36 | 36 | ||
| 37 | /* | 37 | #define HRTIMER_INACTIVE ((void *)1UL) |
| 38 | * Timer states: | ||
| 39 | */ | ||
| 40 | enum hrtimer_state { | ||
| 41 | HRTIMER_INACTIVE, /* Timer is inactive */ | ||
| 42 | HRTIMER_EXPIRED, /* Timer is expired */ | ||
| 43 | HRTIMER_RUNNING, /* Timer is running the callback function */ | ||
| 44 | HRTIMER_PENDING, /* Timer is pending */ | ||
| 45 | }; | ||
| 46 | 38 | ||
| 47 | struct hrtimer_base; | 39 | struct hrtimer_base; |
| 48 | 40 | ||
| @@ -53,9 +45,7 @@ struct hrtimer_base; | |||
| 53 | * @expires: the absolute expiry time in the hrtimers internal | 45 | * @expires: the absolute expiry time in the hrtimers internal |
| 54 | * representation. The time is related to the clock on | 46 | * representation. The time is related to the clock on |
| 55 | * which the timer is based. | 47 | * which the timer is based. |
| 56 | * @state: state of the timer | ||
| 57 | * @function: timer expiry callback function | 48 | * @function: timer expiry callback function |
| 58 | * @data: argument for the callback function | ||
| 59 | * @base: pointer to the timer base (per cpu and per clock) | 49 | * @base: pointer to the timer base (per cpu and per clock) |
| 60 | * | 50 | * |
| 61 | * The hrtimer structure must be initialized by init_hrtimer_#CLOCKTYPE() | 51 | * The hrtimer structure must be initialized by init_hrtimer_#CLOCKTYPE() |
| @@ -63,23 +53,23 @@ struct hrtimer_base; | |||
| 63 | struct hrtimer { | 53 | struct hrtimer { |
| 64 | struct rb_node node; | 54 | struct rb_node node; |
| 65 | ktime_t expires; | 55 | ktime_t expires; |
| 66 | enum hrtimer_state state; | 56 | int (*function)(struct hrtimer *); |
| 67 | int (*function)(void *); | ||
| 68 | void *data; | ||
| 69 | struct hrtimer_base *base; | 57 | struct hrtimer_base *base; |
| 70 | }; | 58 | }; |
| 71 | 59 | ||
| 72 | /** | 60 | /** |
| 73 | * struct hrtimer_base - the timer base for a specific clock | 61 | * struct hrtimer_base - the timer base for a specific clock |
| 74 | * | 62 | * |
| 75 | * @index: clock type index for per_cpu support when moving a timer | 63 | * @index: clock type index for per_cpu support when moving a timer |
| 76 | * to a base on another cpu. | 64 | * to a base on another cpu. |
| 77 | * @lock: lock protecting the base and associated timers | 65 | * @lock: lock protecting the base and associated timers |
| 78 | * @active: red black tree root node for the active timers | 66 | * @active: red black tree root node for the active timers |
| 79 | * @first: pointer to the timer node which expires first | 67 | * @first: pointer to the timer node which expires first |
| 80 | * @resolution: the resolution of the clock, in nanoseconds | 68 | * @resolution: the resolution of the clock, in nanoseconds |
| 81 | * @get_time: function to retrieve the current time of the clock | 69 | * @get_time: function to retrieve the current time of the clock |
| 82 | * @curr_timer: the timer which is executing a callback right now | 70 | * @get_sofirq_time: function to retrieve the current time from the softirq |
| 71 | * @curr_timer: the timer which is executing a callback right now | ||
| 72 | * @softirq_time: the time when running the hrtimer queue in the softirq | ||
| 83 | */ | 73 | */ |
| 84 | struct hrtimer_base { | 74 | struct hrtimer_base { |
| 85 | clockid_t index; | 75 | clockid_t index; |
| @@ -88,7 +78,9 @@ struct hrtimer_base { | |||
| 88 | struct rb_node *first; | 78 | struct rb_node *first; |
| 89 | ktime_t resolution; | 79 | ktime_t resolution; |
| 90 | ktime_t (*get_time)(void); | 80 | ktime_t (*get_time)(void); |
| 81 | ktime_t (*get_softirq_time)(void); | ||
| 91 | struct hrtimer *curr_timer; | 82 | struct hrtimer *curr_timer; |
| 83 | ktime_t softirq_time; | ||
| 92 | }; | 84 | }; |
| 93 | 85 | ||
| 94 | /* | 86 | /* |
| @@ -122,11 +114,12 @@ extern ktime_t hrtimer_get_next_event(void); | |||
| 122 | 114 | ||
| 123 | static inline int hrtimer_active(const struct hrtimer *timer) | 115 | static inline int hrtimer_active(const struct hrtimer *timer) |
| 124 | { | 116 | { |
| 125 | return timer->state == HRTIMER_PENDING; | 117 | return timer->node.rb_parent != HRTIMER_INACTIVE; |
| 126 | } | 118 | } |
| 127 | 119 | ||
| 128 | /* Forward a hrtimer so it expires after now: */ | 120 | /* Forward a hrtimer so it expires after now: */ |
| 129 | extern unsigned long hrtimer_forward(struct hrtimer *timer, ktime_t interval); | 121 | extern unsigned long |
| 122 | hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval); | ||
| 130 | 123 | ||
| 131 | /* Precise sleep: */ | 124 | /* Precise sleep: */ |
| 132 | extern long hrtimer_nanosleep(struct timespec *rqtp, | 125 | extern long hrtimer_nanosleep(struct timespec *rqtp, |
diff --git a/include/linux/i2c-id.h b/include/linux/i2c-id.h index 679b46a6a565..c8b81f419fd8 100644 --- a/include/linux/i2c-id.h +++ b/include/linux/i2c-id.h | |||
| @@ -108,6 +108,10 @@ | |||
| 108 | #define I2C_DRIVERID_UPD64083 78 /* upd64083 video processor */ | 108 | #define I2C_DRIVERID_UPD64083 78 /* upd64083 video processor */ |
| 109 | #define I2C_DRIVERID_UPD64031A 79 /* upd64031a video processor */ | 109 | #define I2C_DRIVERID_UPD64031A 79 /* upd64031a video processor */ |
| 110 | #define I2C_DRIVERID_SAA717X 80 /* saa717x video encoder */ | 110 | #define I2C_DRIVERID_SAA717X 80 /* saa717x video encoder */ |
| 111 | #define I2C_DRIVERID_DS1672 81 /* Dallas/Maxim DS1672 RTC */ | ||
| 112 | #define I2C_DRIVERID_X1205 82 /* Xicor/Intersil X1205 RTC */ | ||
| 113 | #define I2C_DRIVERID_PCF8563 83 /* Philips PCF8563 RTC */ | ||
| 114 | #define I2C_DRIVERID_RS5C372 84 /* Ricoh RS5C372 RTC */ | ||
| 111 | 115 | ||
| 112 | #define I2C_DRIVERID_I2CDEV 900 | 116 | #define I2C_DRIVERID_I2CDEV 900 |
| 113 | #define I2C_DRIVERID_ARP 902 /* SMBus ARP Client */ | 117 | #define I2C_DRIVERID_ARP 902 /* SMBus ARP Client */ |
diff --git a/include/linux/i2o.h b/include/linux/i2o.h index 5a9d8c599171..dd7d627bf66f 100644 --- a/include/linux/i2o.h +++ b/include/linux/i2o.h | |||
| @@ -950,9 +950,7 @@ static inline int i2o_pool_alloc(struct i2o_pool *pool, const char *name, | |||
| 950 | if (!pool->slab) | 950 | if (!pool->slab) |
| 951 | goto free_name; | 951 | goto free_name; |
| 952 | 952 | ||
| 953 | pool->mempool = | 953 | pool->mempool = mempool_create_slab_pool(min_nr, pool->slab); |
| 954 | mempool_create(min_nr, mempool_alloc_slab, mempool_free_slab, | ||
| 955 | pool->slab); | ||
| 956 | if (!pool->mempool) | 954 | if (!pool->mempool) |
| 957 | goto free_slab; | 955 | goto free_slab; |
| 958 | 956 | ||
diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h index d6276e60b3bf..0a84b56935c2 100644 --- a/include/linux/ipmi.h +++ b/include/linux/ipmi.h | |||
| @@ -36,6 +36,7 @@ | |||
| 36 | 36 | ||
| 37 | #include <linux/ipmi_msgdefs.h> | 37 | #include <linux/ipmi_msgdefs.h> |
| 38 | #include <linux/compiler.h> | 38 | #include <linux/compiler.h> |
| 39 | #include <linux/device.h> | ||
| 39 | 40 | ||
| 40 | /* | 41 | /* |
| 41 | * This file describes an interface to an IPMI driver. You have to | 42 | * This file describes an interface to an IPMI driver. You have to |
| @@ -397,7 +398,7 @@ struct ipmi_smi_watcher | |||
| 397 | the watcher list. So you can add and remove users from the | 398 | the watcher list. So you can add and remove users from the |
| 398 | IPMI interface, send messages, etc., but you cannot add | 399 | IPMI interface, send messages, etc., but you cannot add |
| 399 | or remove SMI watchers or SMI interfaces. */ | 400 | or remove SMI watchers or SMI interfaces. */ |
| 400 | void (*new_smi)(int if_num); | 401 | void (*new_smi)(int if_num, struct device *dev); |
| 401 | void (*smi_gone)(int if_num); | 402 | void (*smi_gone)(int if_num); |
| 402 | }; | 403 | }; |
| 403 | 404 | ||
diff --git a/include/linux/ipmi_msgdefs.h b/include/linux/ipmi_msgdefs.h index 03bc64dc2ec1..22f5e2afda4f 100644 --- a/include/linux/ipmi_msgdefs.h +++ b/include/linux/ipmi_msgdefs.h | |||
| @@ -47,6 +47,7 @@ | |||
| 47 | #define IPMI_NETFN_APP_RESPONSE 0x07 | 47 | #define IPMI_NETFN_APP_RESPONSE 0x07 |
| 48 | #define IPMI_GET_DEVICE_ID_CMD 0x01 | 48 | #define IPMI_GET_DEVICE_ID_CMD 0x01 |
| 49 | #define IPMI_CLEAR_MSG_FLAGS_CMD 0x30 | 49 | #define IPMI_CLEAR_MSG_FLAGS_CMD 0x30 |
| 50 | #define IPMI_GET_DEVICE_GUID_CMD 0x08 | ||
| 50 | #define IPMI_GET_MSG_FLAGS_CMD 0x31 | 51 | #define IPMI_GET_MSG_FLAGS_CMD 0x31 |
| 51 | #define IPMI_SEND_MSG_CMD 0x34 | 52 | #define IPMI_SEND_MSG_CMD 0x34 |
| 52 | #define IPMI_GET_MSG_CMD 0x33 | 53 | #define IPMI_GET_MSG_CMD 0x33 |
diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h index e36ee157ad67..53571288a9fc 100644 --- a/include/linux/ipmi_smi.h +++ b/include/linux/ipmi_smi.h | |||
| @@ -37,6 +37,9 @@ | |||
| 37 | #include <linux/ipmi_msgdefs.h> | 37 | #include <linux/ipmi_msgdefs.h> |
| 38 | #include <linux/proc_fs.h> | 38 | #include <linux/proc_fs.h> |
| 39 | #include <linux/module.h> | 39 | #include <linux/module.h> |
| 40 | #include <linux/device.h> | ||
| 41 | #include <linux/platform_device.h> | ||
| 42 | #include <linux/ipmi_smi.h> | ||
| 40 | 43 | ||
| 41 | /* This files describes the interface for IPMI system management interface | 44 | /* This files describes the interface for IPMI system management interface |
| 42 | drivers to bind into the IPMI message handler. */ | 45 | drivers to bind into the IPMI message handler. */ |
| @@ -113,12 +116,52 @@ struct ipmi_smi_handlers | |||
| 113 | void (*dec_usecount)(void *send_info); | 116 | void (*dec_usecount)(void *send_info); |
| 114 | }; | 117 | }; |
| 115 | 118 | ||
| 119 | struct ipmi_device_id { | ||
| 120 | unsigned char device_id; | ||
| 121 | unsigned char device_revision; | ||
| 122 | unsigned char firmware_revision_1; | ||
| 123 | unsigned char firmware_revision_2; | ||
| 124 | unsigned char ipmi_version; | ||
| 125 | unsigned char additional_device_support; | ||
| 126 | unsigned int manufacturer_id; | ||
| 127 | unsigned int product_id; | ||
| 128 | unsigned char aux_firmware_revision[4]; | ||
| 129 | unsigned int aux_firmware_revision_set : 1; | ||
| 130 | }; | ||
| 131 | |||
| 132 | #define ipmi_version_major(v) ((v)->ipmi_version & 0xf) | ||
| 133 | #define ipmi_version_minor(v) ((v)->ipmi_version >> 4) | ||
| 134 | |||
| 135 | /* Take a pointer to a raw data buffer and a length and extract device | ||
| 136 | id information from it. The first byte of data must point to the | ||
| 137 | byte from the get device id response after the completion code. | ||
| 138 | The caller is responsible for making sure the length is at least | ||
| 139 | 11 and the command completed without error. */ | ||
| 140 | static inline void ipmi_demangle_device_id(unsigned char *data, | ||
| 141 | unsigned int data_len, | ||
| 142 | struct ipmi_device_id *id) | ||
| 143 | { | ||
| 144 | id->device_id = data[0]; | ||
| 145 | id->device_revision = data[1]; | ||
| 146 | id->firmware_revision_1 = data[2]; | ||
| 147 | id->firmware_revision_2 = data[3]; | ||
| 148 | id->ipmi_version = data[4]; | ||
| 149 | id->additional_device_support = data[5]; | ||
| 150 | id->manufacturer_id = data[6] | (data[7] << 8) | (data[8] << 16); | ||
| 151 | id->product_id = data[9] | (data[10] << 8); | ||
| 152 | if (data_len >= 15) { | ||
| 153 | memcpy(id->aux_firmware_revision, data+11, 4); | ||
| 154 | id->aux_firmware_revision_set = 1; | ||
| 155 | } else | ||
| 156 | id->aux_firmware_revision_set = 0; | ||
| 157 | } | ||
| 158 | |||
| 116 | /* Add a low-level interface to the IPMI driver. Note that if the | 159 | /* Add a low-level interface to the IPMI driver. Note that if the |
| 117 | interface doesn't know its slave address, it should pass in zero. */ | 160 | interface doesn't know its slave address, it should pass in zero. */ |
| 118 | int ipmi_register_smi(struct ipmi_smi_handlers *handlers, | 161 | int ipmi_register_smi(struct ipmi_smi_handlers *handlers, |
| 119 | void *send_info, | 162 | void *send_info, |
| 120 | unsigned char version_major, | 163 | struct ipmi_device_id *device_id, |
| 121 | unsigned char version_minor, | 164 | struct device *dev, |
| 122 | unsigned char slave_addr, | 165 | unsigned char slave_addr, |
| 123 | ipmi_smi_t *intf); | 166 | ipmi_smi_t *intf); |
| 124 | 167 | ||
diff --git a/include/linux/jbd.h b/include/linux/jbd.h index 4fc7dffd66ef..6a425e370cb3 100644 --- a/include/linux/jbd.h +++ b/include/linux/jbd.h | |||
| @@ -895,7 +895,7 @@ extern int journal_dirty_metadata (handle_t *, struct buffer_head *); | |||
| 895 | extern void journal_release_buffer (handle_t *, struct buffer_head *); | 895 | extern void journal_release_buffer (handle_t *, struct buffer_head *); |
| 896 | extern int journal_forget (handle_t *, struct buffer_head *); | 896 | extern int journal_forget (handle_t *, struct buffer_head *); |
| 897 | extern void journal_sync_buffer (struct buffer_head *); | 897 | extern void journal_sync_buffer (struct buffer_head *); |
| 898 | extern int journal_invalidatepage(journal_t *, | 898 | extern void journal_invalidatepage(journal_t *, |
| 899 | struct page *, unsigned long); | 899 | struct page *, unsigned long); |
| 900 | extern int journal_try_to_free_buffers(journal_t *, struct page *, gfp_t); | 900 | extern int journal_try_to_free_buffers(journal_t *, struct page *, gfp_t); |
| 901 | extern int journal_stop(handle_t *); | 901 | extern int journal_stop(handle_t *); |
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 03d6cfaa5b8a..a3720f973ea5 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
| @@ -87,7 +87,7 @@ extern int cond_resched(void); | |||
| 87 | (__x < 0) ? -__x : __x; \ | 87 | (__x < 0) ? -__x : __x; \ |
| 88 | }) | 88 | }) |
| 89 | 89 | ||
| 90 | extern struct notifier_block *panic_notifier_list; | 90 | extern struct atomic_notifier_head panic_notifier_list; |
| 91 | extern long (*panic_blink)(long time); | 91 | extern long (*panic_blink)(long time); |
| 92 | NORET_TYPE void panic(const char * fmt, ...) | 92 | NORET_TYPE void panic(const char * fmt, ...) |
| 93 | __attribute__ ((NORET_AND format (printf, 1, 2))); | 93 | __attribute__ ((NORET_AND format (printf, 1, 2))); |
diff --git a/include/linux/ktime.h b/include/linux/ktime.h index f3dec45ef874..62bc57580707 100644 --- a/include/linux/ktime.h +++ b/include/linux/ktime.h | |||
| @@ -64,9 +64,6 @@ typedef union { | |||
| 64 | 64 | ||
| 65 | #if (BITS_PER_LONG == 64) || defined(CONFIG_KTIME_SCALAR) | 65 | #if (BITS_PER_LONG == 64) || defined(CONFIG_KTIME_SCALAR) |
| 66 | 66 | ||
| 67 | /* Define a ktime_t variable and initialize it to zero: */ | ||
| 68 | #define DEFINE_KTIME(kt) ktime_t kt = { .tv64 = 0 } | ||
| 69 | |||
| 70 | /** | 67 | /** |
| 71 | * ktime_set - Set a ktime_t variable from a seconds/nanoseconds value | 68 | * ktime_set - Set a ktime_t variable from a seconds/nanoseconds value |
| 72 | * | 69 | * |
| @@ -113,9 +110,6 @@ static inline ktime_t timeval_to_ktime(struct timeval tv) | |||
| 113 | /* Map the ktime_t to timeval conversion to ns_to_timeval function */ | 110 | /* Map the ktime_t to timeval conversion to ns_to_timeval function */ |
| 114 | #define ktime_to_timeval(kt) ns_to_timeval((kt).tv64) | 111 | #define ktime_to_timeval(kt) ns_to_timeval((kt).tv64) |
| 115 | 112 | ||
| 116 | /* Map the ktime_t to clock_t conversion to the inline in jiffies.h: */ | ||
| 117 | #define ktime_to_clock_t(kt) nsec_to_clock_t((kt).tv64) | ||
| 118 | |||
| 119 | /* Convert ktime_t to nanoseconds - NOP in the scalar storage format: */ | 113 | /* Convert ktime_t to nanoseconds - NOP in the scalar storage format: */ |
| 120 | #define ktime_to_ns(kt) ((kt).tv64) | 114 | #define ktime_to_ns(kt) ((kt).tv64) |
| 121 | 115 | ||
| @@ -136,9 +130,6 @@ static inline ktime_t timeval_to_ktime(struct timeval tv) | |||
| 136 | * tv.sec < 0 and 0 >= tv.nsec < NSEC_PER_SEC | 130 | * tv.sec < 0 and 0 >= tv.nsec < NSEC_PER_SEC |
| 137 | */ | 131 | */ |
| 138 | 132 | ||
| 139 | /* Define a ktime_t variable and initialize it to zero: */ | ||
| 140 | #define DEFINE_KTIME(kt) ktime_t kt = { .tv64 = 0 } | ||
| 141 | |||
| 142 | /* Set a ktime_t variable to a value in sec/nsec representation: */ | 133 | /* Set a ktime_t variable to a value in sec/nsec representation: */ |
| 143 | static inline ktime_t ktime_set(const long secs, const unsigned long nsecs) | 134 | static inline ktime_t ktime_set(const long secs, const unsigned long nsecs) |
| 144 | { | 135 | { |
| @@ -255,17 +246,6 @@ static inline struct timeval ktime_to_timeval(const ktime_t kt) | |||
| 255 | } | 246 | } |
| 256 | 247 | ||
| 257 | /** | 248 | /** |
| 258 | * ktime_to_clock_t - convert a ktime_t variable to clock_t format | ||
| 259 | * @kt: the ktime_t variable to convert | ||
| 260 | * | ||
| 261 | * Returns a clock_t variable with the converted value | ||
| 262 | */ | ||
| 263 | static inline clock_t ktime_to_clock_t(const ktime_t kt) | ||
| 264 | { | ||
| 265 | return nsec_to_clock_t( (u64) kt.tv.sec * NSEC_PER_SEC + kt.tv.nsec); | ||
| 266 | } | ||
| 267 | |||
| 268 | /** | ||
| 269 | * ktime_to_ns - convert a ktime_t variable to scalar nanoseconds | 249 | * ktime_to_ns - convert a ktime_t variable to scalar nanoseconds |
| 270 | * @kt: the ktime_t variable to convert | 250 | * @kt: the ktime_t variable to convert |
| 271 | * | 251 | * |
diff --git a/include/linux/m48t86.h b/include/linux/m48t86.h new file mode 100644 index 000000000000..9065199319d0 --- /dev/null +++ b/include/linux/m48t86.h | |||
| @@ -0,0 +1,16 @@ | |||
| 1 | /* | ||
| 2 | * ST M48T86 / Dallas DS12887 RTC driver | ||
| 3 | * Copyright (c) 2006 Tower Technologies | ||
| 4 | * | ||
| 5 | * Author: Alessandro Zummo <a.zummo@towertech.it> | ||
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or modify | ||
| 8 | * it under the terms of the GNU General Public License version 2 as | ||
| 9 | * published by the Free Software Foundation. | ||
| 10 | */ | ||
| 11 | |||
| 12 | struct m48t86_ops | ||
| 13 | { | ||
| 14 | void (*writeb)(unsigned char value, unsigned long addr); | ||
| 15 | unsigned char (*readb)(unsigned long addr); | ||
| 16 | }; | ||
diff --git a/include/linux/memory.h b/include/linux/memory.h index e251dc43d0f5..8f04143ca363 100644 --- a/include/linux/memory.h +++ b/include/linux/memory.h | |||
| @@ -77,7 +77,6 @@ extern int remove_memory_block(unsigned long, struct mem_section *, int); | |||
| 77 | 77 | ||
| 78 | #define CONFIG_MEM_BLOCK_SIZE (PAGES_PER_SECTION<<PAGE_SHIFT) | 78 | #define CONFIG_MEM_BLOCK_SIZE (PAGES_PER_SECTION<<PAGE_SHIFT) |
| 79 | 79 | ||
| 80 | struct notifier_block; | ||
| 81 | 80 | ||
| 82 | #endif /* CONFIG_MEMORY_HOTPLUG */ | 81 | #endif /* CONFIG_MEMORY_HOTPLUG */ |
| 83 | 82 | ||
diff --git a/include/linux/mempool.h b/include/linux/mempool.h index f2427d7394b0..9be484d11283 100644 --- a/include/linux/mempool.h +++ b/include/linux/mempool.h | |||
| @@ -6,6 +6,8 @@ | |||
| 6 | 6 | ||
| 7 | #include <linux/wait.h> | 7 | #include <linux/wait.h> |
| 8 | 8 | ||
| 9 | struct kmem_cache; | ||
| 10 | |||
| 9 | typedef void * (mempool_alloc_t)(gfp_t gfp_mask, void *pool_data); | 11 | typedef void * (mempool_alloc_t)(gfp_t gfp_mask, void *pool_data); |
| 10 | typedef void (mempool_free_t)(void *element, void *pool_data); | 12 | typedef void (mempool_free_t)(void *element, void *pool_data); |
| 11 | 13 | ||
| @@ -37,5 +39,41 @@ extern void mempool_free(void *element, mempool_t *pool); | |||
| 37 | */ | 39 | */ |
| 38 | void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data); | 40 | void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data); |
| 39 | void mempool_free_slab(void *element, void *pool_data); | 41 | void mempool_free_slab(void *element, void *pool_data); |
| 42 | static inline mempool_t * | ||
| 43 | mempool_create_slab_pool(int min_nr, struct kmem_cache *kc) | ||
| 44 | { | ||
| 45 | return mempool_create(min_nr, mempool_alloc_slab, mempool_free_slab, | ||
| 46 | (void *) kc); | ||
| 47 | } | ||
| 48 | |||
| 49 | /* | ||
| 50 | * 2 mempool_alloc_t's and a mempool_free_t to kmalloc/kzalloc and kfree | ||
| 51 | * the amount of memory specified by pool_data | ||
| 52 | */ | ||
| 53 | void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data); | ||
| 54 | void *mempool_kzalloc(gfp_t gfp_mask, void *pool_data); | ||
| 55 | void mempool_kfree(void *element, void *pool_data); | ||
| 56 | static inline mempool_t *mempool_create_kmalloc_pool(int min_nr, size_t size) | ||
| 57 | { | ||
| 58 | return mempool_create(min_nr, mempool_kmalloc, mempool_kfree, | ||
| 59 | (void *) size); | ||
| 60 | } | ||
| 61 | static inline mempool_t *mempool_create_kzalloc_pool(int min_nr, size_t size) | ||
| 62 | { | ||
| 63 | return mempool_create(min_nr, mempool_kzalloc, mempool_kfree, | ||
| 64 | (void *) size); | ||
| 65 | } | ||
| 66 | |||
| 67 | /* | ||
| 68 | * A mempool_alloc_t and mempool_free_t for a simple page allocator that | ||
| 69 | * allocates pages of the order specified by pool_data | ||
| 70 | */ | ||
| 71 | void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data); | ||
| 72 | void mempool_free_pages(void *element, void *pool_data); | ||
| 73 | static inline mempool_t *mempool_create_page_pool(int min_nr, int order) | ||
| 74 | { | ||
| 75 | return mempool_create(min_nr, mempool_alloc_pages, mempool_free_pages, | ||
| 76 | (void *)(long)order); | ||
| 77 | } | ||
| 40 | 78 | ||
| 41 | #endif /* _LINUX_MEMPOOL_H */ | 79 | #endif /* _LINUX_MEMPOOL_H */ |
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index ebfc238cc243..b5c21122c299 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | #include <linux/numa.h> | 13 | #include <linux/numa.h> |
| 14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
| 15 | #include <linux/seqlock.h> | 15 | #include <linux/seqlock.h> |
| 16 | #include <linux/nodemask.h> | ||
| 16 | #include <asm/atomic.h> | 17 | #include <asm/atomic.h> |
| 17 | 18 | ||
| 18 | /* Free memory management - zoned buddy allocator. */ | 19 | /* Free memory management - zoned buddy allocator. */ |
| @@ -225,7 +226,6 @@ struct zone { | |||
| 225 | * Discontig memory support fields. | 226 | * Discontig memory support fields. |
| 226 | */ | 227 | */ |
| 227 | struct pglist_data *zone_pgdat; | 228 | struct pglist_data *zone_pgdat; |
| 228 | struct page *zone_mem_map; | ||
| 229 | /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */ | 229 | /* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */ |
| 230 | unsigned long zone_start_pfn; | 230 | unsigned long zone_start_pfn; |
| 231 | 231 | ||
| @@ -307,7 +307,6 @@ typedef struct pglist_data { | |||
| 307 | unsigned long node_spanned_pages; /* total size of physical page | 307 | unsigned long node_spanned_pages; /* total size of physical page |
| 308 | range, including holes */ | 308 | range, including holes */ |
| 309 | int node_id; | 309 | int node_id; |
| 310 | struct pglist_data *pgdat_next; | ||
| 311 | wait_queue_head_t kswapd_wait; | 310 | wait_queue_head_t kswapd_wait; |
| 312 | struct task_struct *kswapd; | 311 | struct task_struct *kswapd; |
| 313 | int kswapd_max_order; | 312 | int kswapd_max_order; |
| @@ -324,8 +323,6 @@ typedef struct pglist_data { | |||
| 324 | 323 | ||
| 325 | #include <linux/memory_hotplug.h> | 324 | #include <linux/memory_hotplug.h> |
| 326 | 325 | ||
| 327 | extern struct pglist_data *pgdat_list; | ||
| 328 | |||
| 329 | void __get_zone_counts(unsigned long *active, unsigned long *inactive, | 326 | void __get_zone_counts(unsigned long *active, unsigned long *inactive, |
| 330 | unsigned long *free, struct pglist_data *pgdat); | 327 | unsigned long *free, struct pglist_data *pgdat); |
| 331 | void get_zone_counts(unsigned long *active, unsigned long *inactive, | 328 | void get_zone_counts(unsigned long *active, unsigned long *inactive, |
| @@ -350,57 +347,6 @@ unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long); | |||
| 350 | */ | 347 | */ |
| 351 | #define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones) | 348 | #define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones) |
| 352 | 349 | ||
| 353 | /** | ||
| 354 | * for_each_pgdat - helper macro to iterate over all nodes | ||
| 355 | * @pgdat - pointer to a pg_data_t variable | ||
| 356 | * | ||
| 357 | * Meant to help with common loops of the form | ||
| 358 | * pgdat = pgdat_list; | ||
| 359 | * while(pgdat) { | ||
| 360 | * ... | ||
| 361 | * pgdat = pgdat->pgdat_next; | ||
| 362 | * } | ||
| 363 | */ | ||
| 364 | #define for_each_pgdat(pgdat) \ | ||
| 365 | for (pgdat = pgdat_list; pgdat; pgdat = pgdat->pgdat_next) | ||
| 366 | |||
| 367 | /* | ||
| 368 | * next_zone - helper magic for for_each_zone() | ||
| 369 | * Thanks to William Lee Irwin III for this piece of ingenuity. | ||
| 370 | */ | ||
| 371 | static inline struct zone *next_zone(struct zone *zone) | ||
| 372 | { | ||
| 373 | pg_data_t *pgdat = zone->zone_pgdat; | ||
| 374 | |||
| 375 | if (zone < pgdat->node_zones + MAX_NR_ZONES - 1) | ||
| 376 | zone++; | ||
| 377 | else if (pgdat->pgdat_next) { | ||
| 378 | pgdat = pgdat->pgdat_next; | ||
| 379 | zone = pgdat->node_zones; | ||
| 380 | } else | ||
| 381 | zone = NULL; | ||
| 382 | |||
| 383 | return zone; | ||
| 384 | } | ||
| 385 | |||
| 386 | /** | ||
| 387 | * for_each_zone - helper macro to iterate over all memory zones | ||
| 388 | * @zone - pointer to struct zone variable | ||
| 389 | * | ||
| 390 | * The user only needs to declare the zone variable, for_each_zone | ||
| 391 | * fills it in. This basically means for_each_zone() is an | ||
| 392 | * easier to read version of this piece of code: | ||
| 393 | * | ||
| 394 | * for (pgdat = pgdat_list; pgdat; pgdat = pgdat->node_next) | ||
| 395 | * for (i = 0; i < MAX_NR_ZONES; ++i) { | ||
| 396 | * struct zone * z = pgdat->node_zones + i; | ||
| 397 | * ... | ||
| 398 | * } | ||
| 399 | * } | ||
| 400 | */ | ||
| 401 | #define for_each_zone(zone) \ | ||
| 402 | for (zone = pgdat_list->node_zones; zone; zone = next_zone(zone)) | ||
| 403 | |||
| 404 | static inline int populated_zone(struct zone *zone) | 350 | static inline int populated_zone(struct zone *zone) |
| 405 | { | 351 | { |
| 406 | return (!!zone->present_pages); | 352 | return (!!zone->present_pages); |
| @@ -472,6 +418,30 @@ extern struct pglist_data contig_page_data; | |||
| 472 | 418 | ||
| 473 | #endif /* !CONFIG_NEED_MULTIPLE_NODES */ | 419 | #endif /* !CONFIG_NEED_MULTIPLE_NODES */ |
| 474 | 420 | ||
| 421 | extern struct pglist_data *first_online_pgdat(void); | ||
| 422 | extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat); | ||
| 423 | extern struct zone *next_zone(struct zone *zone); | ||
| 424 | |||
| 425 | /** | ||
| 426 | * for_each_pgdat - helper macro to iterate over all nodes | ||
| 427 | * @pgdat - pointer to a pg_data_t variable | ||
| 428 | */ | ||
| 429 | #define for_each_online_pgdat(pgdat) \ | ||
| 430 | for (pgdat = first_online_pgdat(); \ | ||
| 431 | pgdat; \ | ||
| 432 | pgdat = next_online_pgdat(pgdat)) | ||
| 433 | /** | ||
| 434 | * for_each_zone - helper macro to iterate over all memory zones | ||
| 435 | * @zone - pointer to struct zone variable | ||
| 436 | * | ||
| 437 | * The user only needs to declare the zone variable, for_each_zone | ||
| 438 | * fills it in. | ||
| 439 | */ | ||
| 440 | #define for_each_zone(zone) \ | ||
| 441 | for (zone = (first_online_pgdat())->node_zones; \ | ||
| 442 | zone; \ | ||
| 443 | zone = next_zone(zone)) | ||
| 444 | |||
| 475 | #ifdef CONFIG_SPARSEMEM | 445 | #ifdef CONFIG_SPARSEMEM |
| 476 | #include <asm/sparsemem.h> | 446 | #include <asm/sparsemem.h> |
| 477 | #endif | 447 | #endif |
| @@ -602,17 +572,6 @@ static inline struct mem_section *__pfn_to_section(unsigned long pfn) | |||
| 602 | return __nr_to_section(pfn_to_section_nr(pfn)); | 572 | return __nr_to_section(pfn_to_section_nr(pfn)); |
| 603 | } | 573 | } |
| 604 | 574 | ||
| 605 | #define pfn_to_page(pfn) \ | ||
| 606 | ({ \ | ||
| 607 | unsigned long __pfn = (pfn); \ | ||
| 608 | __section_mem_map_addr(__pfn_to_section(__pfn)) + __pfn; \ | ||
| 609 | }) | ||
| 610 | #define page_to_pfn(page) \ | ||
| 611 | ({ \ | ||
| 612 | page - __section_mem_map_addr(__nr_to_section( \ | ||
| 613 | page_to_section(page))); \ | ||
| 614 | }) | ||
| 615 | |||
| 616 | static inline int pfn_valid(unsigned long pfn) | 575 | static inline int pfn_valid(unsigned long pfn) |
| 617 | { | 576 | { |
| 618 | if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) | 577 | if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) |
diff --git a/include/linux/netfilter_ipv4/ip_conntrack.h b/include/linux/netfilter_ipv4/ip_conntrack.h index f32d75c4f4cf..d54d7b278e96 100644 --- a/include/linux/netfilter_ipv4/ip_conntrack.h +++ b/include/linux/netfilter_ipv4/ip_conntrack.h | |||
| @@ -308,29 +308,30 @@ DECLARE_PER_CPU(struct ip_conntrack_ecache, ip_conntrack_ecache); | |||
| 308 | 308 | ||
| 309 | #define CONNTRACK_ECACHE(x) (__get_cpu_var(ip_conntrack_ecache).x) | 309 | #define CONNTRACK_ECACHE(x) (__get_cpu_var(ip_conntrack_ecache).x) |
| 310 | 310 | ||
| 311 | extern struct notifier_block *ip_conntrack_chain; | 311 | extern struct atomic_notifier_head ip_conntrack_chain; |
| 312 | extern struct notifier_block *ip_conntrack_expect_chain; | 312 | extern struct atomic_notifier_head ip_conntrack_expect_chain; |
| 313 | 313 | ||
| 314 | static inline int ip_conntrack_register_notifier(struct notifier_block *nb) | 314 | static inline int ip_conntrack_register_notifier(struct notifier_block *nb) |
| 315 | { | 315 | { |
| 316 | return notifier_chain_register(&ip_conntrack_chain, nb); | 316 | return atomic_notifier_chain_register(&ip_conntrack_chain, nb); |
| 317 | } | 317 | } |
| 318 | 318 | ||
| 319 | static inline int ip_conntrack_unregister_notifier(struct notifier_block *nb) | 319 | static inline int ip_conntrack_unregister_notifier(struct notifier_block *nb) |
| 320 | { | 320 | { |
| 321 | return notifier_chain_unregister(&ip_conntrack_chain, nb); | 321 | return atomic_notifier_chain_unregister(&ip_conntrack_chain, nb); |
| 322 | } | 322 | } |
| 323 | 323 | ||
| 324 | static inline int | 324 | static inline int |
| 325 | ip_conntrack_expect_register_notifier(struct notifier_block *nb) | 325 | ip_conntrack_expect_register_notifier(struct notifier_block *nb) |
| 326 | { | 326 | { |
| 327 | return notifier_chain_register(&ip_conntrack_expect_chain, nb); | 327 | return atomic_notifier_chain_register(&ip_conntrack_expect_chain, nb); |
| 328 | } | 328 | } |
| 329 | 329 | ||
| 330 | static inline int | 330 | static inline int |
| 331 | ip_conntrack_expect_unregister_notifier(struct notifier_block *nb) | 331 | ip_conntrack_expect_unregister_notifier(struct notifier_block *nb) |
| 332 | { | 332 | { |
| 333 | return notifier_chain_unregister(&ip_conntrack_expect_chain, nb); | 333 | return atomic_notifier_chain_unregister(&ip_conntrack_expect_chain, |
| 334 | nb); | ||
| 334 | } | 335 | } |
| 335 | 336 | ||
| 336 | extern void ip_ct_deliver_cached_events(const struct ip_conntrack *ct); | 337 | extern void ip_ct_deliver_cached_events(const struct ip_conntrack *ct); |
| @@ -355,14 +356,14 @@ static inline void ip_conntrack_event(enum ip_conntrack_events event, | |||
| 355 | struct ip_conntrack *ct) | 356 | struct ip_conntrack *ct) |
| 356 | { | 357 | { |
| 357 | if (is_confirmed(ct) && !is_dying(ct)) | 358 | if (is_confirmed(ct) && !is_dying(ct)) |
| 358 | notifier_call_chain(&ip_conntrack_chain, event, ct); | 359 | atomic_notifier_call_chain(&ip_conntrack_chain, event, ct); |
| 359 | } | 360 | } |
| 360 | 361 | ||
| 361 | static inline void | 362 | static inline void |
| 362 | ip_conntrack_expect_event(enum ip_conntrack_expect_events event, | 363 | ip_conntrack_expect_event(enum ip_conntrack_expect_events event, |
| 363 | struct ip_conntrack_expect *exp) | 364 | struct ip_conntrack_expect *exp) |
| 364 | { | 365 | { |
| 365 | notifier_call_chain(&ip_conntrack_expect_chain, event, exp); | 366 | atomic_notifier_call_chain(&ip_conntrack_expect_chain, event, exp); |
| 366 | } | 367 | } |
| 367 | #else /* CONFIG_IP_NF_CONNTRACK_EVENTS */ | 368 | #else /* CONFIG_IP_NF_CONNTRACK_EVENTS */ |
| 368 | static inline void ip_conntrack_event_cache(enum ip_conntrack_events event, | 369 | static inline void ip_conntrack_event_cache(enum ip_conntrack_events event, |
diff --git a/include/linux/nfsd/export.h b/include/linux/nfsd/export.h index 6bad4766d3d9..d2a8abb5011a 100644 --- a/include/linux/nfsd/export.h +++ b/include/linux/nfsd/export.h | |||
| @@ -67,7 +67,8 @@ struct svc_expkey { | |||
| 67 | int ek_fsidtype; | 67 | int ek_fsidtype; |
| 68 | u32 ek_fsid[3]; | 68 | u32 ek_fsid[3]; |
| 69 | 69 | ||
| 70 | struct svc_export * ek_export; | 70 | struct vfsmount * ek_mnt; |
| 71 | struct dentry * ek_dentry; | ||
| 71 | }; | 72 | }; |
| 72 | 73 | ||
| 73 | #define EX_SECURE(exp) (!((exp)->ex_flags & NFSEXP_INSECURE_PORT)) | 74 | #define EX_SECURE(exp) (!((exp)->ex_flags & NFSEXP_INSECURE_PORT)) |
| @@ -85,9 +86,6 @@ void nfsd_export_shutdown(void); | |||
| 85 | void nfsd_export_flush(void); | 86 | void nfsd_export_flush(void); |
| 86 | void exp_readlock(void); | 87 | void exp_readlock(void); |
| 87 | void exp_readunlock(void); | 88 | void exp_readunlock(void); |
| 88 | struct svc_expkey * exp_find_key(struct auth_domain *clp, | ||
| 89 | int fsid_type, u32 *fsidv, | ||
| 90 | struct cache_req *reqp); | ||
| 91 | struct svc_export * exp_get_by_name(struct auth_domain *clp, | 89 | struct svc_export * exp_get_by_name(struct auth_domain *clp, |
| 92 | struct vfsmount *mnt, | 90 | struct vfsmount *mnt, |
| 93 | struct dentry *dentry, | 91 | struct dentry *dentry, |
| @@ -101,35 +99,20 @@ int exp_rootfh(struct auth_domain *, | |||
| 101 | int exp_pseudoroot(struct auth_domain *, struct svc_fh *fhp, struct cache_req *creq); | 99 | int exp_pseudoroot(struct auth_domain *, struct svc_fh *fhp, struct cache_req *creq); |
| 102 | int nfserrno(int errno); | 100 | int nfserrno(int errno); |
| 103 | 101 | ||
| 104 | extern void expkey_put(struct cache_head *item, struct cache_detail *cd); | 102 | extern struct cache_detail svc_export_cache; |
| 105 | extern void svc_export_put(struct cache_head *item, struct cache_detail *cd); | ||
| 106 | extern struct cache_detail svc_export_cache, svc_expkey_cache; | ||
| 107 | 103 | ||
| 108 | static inline void exp_put(struct svc_export *exp) | 104 | static inline void exp_put(struct svc_export *exp) |
| 109 | { | 105 | { |
| 110 | svc_export_put(&exp->h, &svc_export_cache); | 106 | cache_put(&exp->h, &svc_export_cache); |
| 111 | } | 107 | } |
| 112 | 108 | ||
| 113 | static inline void exp_get(struct svc_export *exp) | 109 | static inline void exp_get(struct svc_export *exp) |
| 114 | { | 110 | { |
| 115 | cache_get(&exp->h); | 111 | cache_get(&exp->h); |
| 116 | } | 112 | } |
| 117 | static inline struct svc_export * | 113 | extern struct svc_export * |
| 118 | exp_find(struct auth_domain *clp, int fsid_type, u32 *fsidv, | 114 | exp_find(struct auth_domain *clp, int fsid_type, u32 *fsidv, |
| 119 | struct cache_req *reqp) | 115 | struct cache_req *reqp); |
| 120 | { | ||
| 121 | struct svc_expkey *ek = exp_find_key(clp, fsid_type, fsidv, reqp); | ||
| 122 | if (ek && !IS_ERR(ek)) { | ||
| 123 | struct svc_export *exp = ek->ek_export; | ||
| 124 | int err; | ||
| 125 | exp_get(exp); | ||
| 126 | expkey_put(&ek->h, &svc_expkey_cache); | ||
| 127 | if ((err = cache_check(&svc_export_cache, &exp->h, reqp))) | ||
| 128 | exp = ERR_PTR(err); | ||
| 129 | return exp; | ||
| 130 | } else | ||
| 131 | return ERR_PTR(PTR_ERR(ek)); | ||
| 132 | } | ||
| 133 | 116 | ||
| 134 | #endif /* __KERNEL__ */ | 117 | #endif /* __KERNEL__ */ |
| 135 | 118 | ||
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h index b959a4525cbd..1a9ef3e627d1 100644 --- a/include/linux/nodemask.h +++ b/include/linux/nodemask.h | |||
| @@ -350,11 +350,15 @@ extern nodemask_t node_possible_map; | |||
| 350 | #define num_possible_nodes() nodes_weight(node_possible_map) | 350 | #define num_possible_nodes() nodes_weight(node_possible_map) |
| 351 | #define node_online(node) node_isset((node), node_online_map) | 351 | #define node_online(node) node_isset((node), node_online_map) |
| 352 | #define node_possible(node) node_isset((node), node_possible_map) | 352 | #define node_possible(node) node_isset((node), node_possible_map) |
| 353 | #define first_online_node first_node(node_online_map) | ||
| 354 | #define next_online_node(nid) next_node((nid), node_online_map) | ||
| 353 | #else | 355 | #else |
| 354 | #define num_online_nodes() 1 | 356 | #define num_online_nodes() 1 |
| 355 | #define num_possible_nodes() 1 | 357 | #define num_possible_nodes() 1 |
| 356 | #define node_online(node) ((node) == 0) | 358 | #define node_online(node) ((node) == 0) |
| 357 | #define node_possible(node) ((node) == 0) | 359 | #define node_possible(node) ((node) == 0) |
| 360 | #define first_online_node 0 | ||
| 361 | #define next_online_node(nid) (MAX_NUMNODES) | ||
| 358 | #endif | 362 | #endif |
| 359 | 363 | ||
| 360 | #define any_online_node(mask) \ | 364 | #define any_online_node(mask) \ |
diff --git a/include/linux/notifier.h b/include/linux/notifier.h index 5937dd6053c3..51dbab9710c7 100644 --- a/include/linux/notifier.h +++ b/include/linux/notifier.h | |||
| @@ -10,25 +10,107 @@ | |||
| 10 | #ifndef _LINUX_NOTIFIER_H | 10 | #ifndef _LINUX_NOTIFIER_H |
| 11 | #define _LINUX_NOTIFIER_H | 11 | #define _LINUX_NOTIFIER_H |
| 12 | #include <linux/errno.h> | 12 | #include <linux/errno.h> |
| 13 | #include <linux/mutex.h> | ||
| 14 | #include <linux/rwsem.h> | ||
| 13 | 15 | ||
| 14 | struct notifier_block | 16 | /* |
| 15 | { | 17 | * Notifier chains are of three types: |
| 16 | int (*notifier_call)(struct notifier_block *self, unsigned long, void *); | 18 | * |
| 19 | * Atomic notifier chains: Chain callbacks run in interrupt/atomic | ||
| 20 | * context. Callouts are not allowed to block. | ||
| 21 | * Blocking notifier chains: Chain callbacks run in process context. | ||
| 22 | * Callouts are allowed to block. | ||
| 23 | * Raw notifier chains: There are no restrictions on callbacks, | ||
| 24 | * registration, or unregistration. All locking and protection | ||
| 25 | * must be provided by the caller. | ||
| 26 | * | ||
| 27 | * atomic_notifier_chain_register() may be called from an atomic context, | ||
| 28 | * but blocking_notifier_chain_register() must be called from a process | ||
| 29 | * context. Ditto for the corresponding _unregister() routines. | ||
| 30 | * | ||
| 31 | * atomic_notifier_chain_unregister() and blocking_notifier_chain_unregister() | ||
| 32 | * _must not_ be called from within the call chain. | ||
| 33 | */ | ||
| 34 | |||
| 35 | struct notifier_block { | ||
| 36 | int (*notifier_call)(struct notifier_block *, unsigned long, void *); | ||
| 17 | struct notifier_block *next; | 37 | struct notifier_block *next; |
| 18 | int priority; | 38 | int priority; |
| 19 | }; | 39 | }; |
| 20 | 40 | ||
| 41 | struct atomic_notifier_head { | ||
| 42 | spinlock_t lock; | ||
| 43 | struct notifier_block *head; | ||
| 44 | }; | ||
| 45 | |||
| 46 | struct blocking_notifier_head { | ||
| 47 | struct rw_semaphore rwsem; | ||
| 48 | struct notifier_block *head; | ||
| 49 | }; | ||
| 50 | |||
| 51 | struct raw_notifier_head { | ||
| 52 | struct notifier_block *head; | ||
| 53 | }; | ||
| 54 | |||
| 55 | #define ATOMIC_INIT_NOTIFIER_HEAD(name) do { \ | ||
| 56 | spin_lock_init(&(name)->lock); \ | ||
| 57 | (name)->head = NULL; \ | ||
| 58 | } while (0) | ||
| 59 | #define BLOCKING_INIT_NOTIFIER_HEAD(name) do { \ | ||
| 60 | init_rwsem(&(name)->rwsem); \ | ||
| 61 | (name)->head = NULL; \ | ||
| 62 | } while (0) | ||
| 63 | #define RAW_INIT_NOTIFIER_HEAD(name) do { \ | ||
| 64 | (name)->head = NULL; \ | ||
| 65 | } while (0) | ||
| 66 | |||
| 67 | #define ATOMIC_NOTIFIER_INIT(name) { \ | ||
| 68 | .lock = SPIN_LOCK_UNLOCKED, \ | ||
| 69 | .head = NULL } | ||
| 70 | #define BLOCKING_NOTIFIER_INIT(name) { \ | ||
| 71 | .rwsem = __RWSEM_INITIALIZER((name).rwsem), \ | ||
| 72 | .head = NULL } | ||
| 73 | #define RAW_NOTIFIER_INIT(name) { \ | ||
| 74 | .head = NULL } | ||
| 75 | |||
| 76 | #define ATOMIC_NOTIFIER_HEAD(name) \ | ||
| 77 | struct atomic_notifier_head name = \ | ||
| 78 | ATOMIC_NOTIFIER_INIT(name) | ||
| 79 | #define BLOCKING_NOTIFIER_HEAD(name) \ | ||
| 80 | struct blocking_notifier_head name = \ | ||
| 81 | BLOCKING_NOTIFIER_INIT(name) | ||
| 82 | #define RAW_NOTIFIER_HEAD(name) \ | ||
| 83 | struct raw_notifier_head name = \ | ||
| 84 | RAW_NOTIFIER_INIT(name) | ||
| 21 | 85 | ||
| 22 | #ifdef __KERNEL__ | 86 | #ifdef __KERNEL__ |
| 23 | 87 | ||
| 24 | extern int notifier_chain_register(struct notifier_block **list, struct notifier_block *n); | 88 | extern int atomic_notifier_chain_register(struct atomic_notifier_head *, |
| 25 | extern int notifier_chain_unregister(struct notifier_block **nl, struct notifier_block *n); | 89 | struct notifier_block *); |
| 26 | extern int notifier_call_chain(struct notifier_block **n, unsigned long val, void *v); | 90 | extern int blocking_notifier_chain_register(struct blocking_notifier_head *, |
| 91 | struct notifier_block *); | ||
| 92 | extern int raw_notifier_chain_register(struct raw_notifier_head *, | ||
| 93 | struct notifier_block *); | ||
| 94 | |||
| 95 | extern int atomic_notifier_chain_unregister(struct atomic_notifier_head *, | ||
| 96 | struct notifier_block *); | ||
| 97 | extern int blocking_notifier_chain_unregister(struct blocking_notifier_head *, | ||
| 98 | struct notifier_block *); | ||
| 99 | extern int raw_notifier_chain_unregister(struct raw_notifier_head *, | ||
| 100 | struct notifier_block *); | ||
| 101 | |||
| 102 | extern int atomic_notifier_call_chain(struct atomic_notifier_head *, | ||
| 103 | unsigned long val, void *v); | ||
| 104 | extern int blocking_notifier_call_chain(struct blocking_notifier_head *, | ||
| 105 | unsigned long val, void *v); | ||
| 106 | extern int raw_notifier_call_chain(struct raw_notifier_head *, | ||
| 107 | unsigned long val, void *v); | ||
| 27 | 108 | ||
| 28 | #define NOTIFY_DONE 0x0000 /* Don't care */ | 109 | #define NOTIFY_DONE 0x0000 /* Don't care */ |
| 29 | #define NOTIFY_OK 0x0001 /* Suits me */ | 110 | #define NOTIFY_OK 0x0001 /* Suits me */ |
| 30 | #define NOTIFY_STOP_MASK 0x8000 /* Don't call further */ | 111 | #define NOTIFY_STOP_MASK 0x8000 /* Don't call further */ |
| 31 | #define NOTIFY_BAD (NOTIFY_STOP_MASK|0x0002) /* Bad/Veto action */ | 112 | #define NOTIFY_BAD (NOTIFY_STOP_MASK|0x0002) |
| 113 | /* Bad/Veto action */ | ||
| 32 | /* | 114 | /* |
| 33 | * Clean way to return from the notifier and stop further calls. | 115 | * Clean way to return from the notifier and stop further calls. |
| 34 | */ | 116 | */ |
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 6f080ae59286..02f6cf20b141 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h | |||
| @@ -1052,6 +1052,7 @@ | |||
| 1052 | #define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6600_ALT2 0x00f2 | 1052 | #define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6600_ALT2 0x00f2 |
| 1053 | #define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6200_ALT1 0x00f3 | 1053 | #define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6200_ALT1 0x00f3 |
| 1054 | #define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6800_GT 0x00f9 | 1054 | #define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6800_GT 0x00f9 |
| 1055 | #define PCIE_DEVICE_ID_NVIDIA_QUADRO_NVS280 0x00fd | ||
| 1055 | #define PCI_DEVICE_ID_NVIDIA_GEFORCE_SDR 0x0100 | 1056 | #define PCI_DEVICE_ID_NVIDIA_GEFORCE_SDR 0x0100 |
| 1056 | #define PCI_DEVICE_ID_NVIDIA_GEFORCE_DDR 0x0101 | 1057 | #define PCI_DEVICE_ID_NVIDIA_GEFORCE_DDR 0x0101 |
| 1057 | #define PCI_DEVICE_ID_NVIDIA_QUADRO 0x0103 | 1058 | #define PCI_DEVICE_ID_NVIDIA_QUADRO 0x0103 |
diff --git a/include/linux/pfn.h b/include/linux/pfn.h new file mode 100644 index 000000000000..bb01f8b92b56 --- /dev/null +++ b/include/linux/pfn.h | |||
| @@ -0,0 +1,9 @@ | |||
| 1 | #ifndef _LINUX_PFN_H_ | ||
| 2 | #define _LINUX_PFN_H_ | ||
| 3 | |||
| 4 | #define PFN_ALIGN(x) (((unsigned long)(x) + (PAGE_SIZE - 1)) & PAGE_MASK) | ||
| 5 | #define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT) | ||
| 6 | #define PFN_DOWN(x) ((x) >> PAGE_SHIFT) | ||
| 7 | #define PFN_PHYS(x) ((x) << PAGE_SHIFT) | ||
| 8 | |||
| 9 | #endif | ||
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index aa6322d45198..cb224cf653b1 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h | |||
| @@ -4,6 +4,7 @@ | |||
| 4 | #include <linux/config.h> | 4 | #include <linux/config.h> |
| 5 | #include <linux/slab.h> | 5 | #include <linux/slab.h> |
| 6 | #include <linux/fs.h> | 6 | #include <linux/fs.h> |
| 7 | #include <linux/spinlock.h> | ||
| 7 | #include <asm/atomic.h> | 8 | #include <asm/atomic.h> |
| 8 | 9 | ||
| 9 | /* | 10 | /* |
| @@ -55,7 +56,7 @@ struct proc_dir_entry { | |||
| 55 | nlink_t nlink; | 56 | nlink_t nlink; |
| 56 | uid_t uid; | 57 | uid_t uid; |
| 57 | gid_t gid; | 58 | gid_t gid; |
| 58 | unsigned long size; | 59 | loff_t size; |
| 59 | struct inode_operations * proc_iops; | 60 | struct inode_operations * proc_iops; |
| 60 | struct file_operations * proc_fops; | 61 | struct file_operations * proc_fops; |
| 61 | get_info_t *get_info; | 62 | get_info_t *get_info; |
| @@ -92,6 +93,8 @@ extern struct proc_dir_entry *proc_bus; | |||
| 92 | extern struct proc_dir_entry *proc_root_driver; | 93 | extern struct proc_dir_entry *proc_root_driver; |
| 93 | extern struct proc_dir_entry *proc_root_kcore; | 94 | extern struct proc_dir_entry *proc_root_kcore; |
| 94 | 95 | ||
| 96 | extern spinlock_t proc_subdir_lock; | ||
| 97 | |||
| 95 | extern void proc_root_init(void); | 98 | extern void proc_root_init(void); |
| 96 | extern void proc_misc_init(void); | 99 | extern void proc_misc_init(void); |
| 97 | 100 | ||
diff --git a/include/linux/raid/md.h b/include/linux/raid/md.h index b6e0bcad84e1..66b44e5e0d6e 100644 --- a/include/linux/raid/md.h +++ b/include/linux/raid/md.h | |||
| @@ -92,7 +92,10 @@ extern void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, | |||
| 92 | extern void md_super_wait(mddev_t *mddev); | 92 | extern void md_super_wait(mddev_t *mddev); |
| 93 | extern int sync_page_io(struct block_device *bdev, sector_t sector, int size, | 93 | extern int sync_page_io(struct block_device *bdev, sector_t sector, int size, |
| 94 | struct page *page, int rw); | 94 | struct page *page, int rw); |
| 95 | extern void md_do_sync(mddev_t *mddev); | ||
| 96 | extern void md_new_event(mddev_t *mddev); | ||
| 95 | 97 | ||
| 98 | extern void md_update_sb(mddev_t * mddev); | ||
| 96 | 99 | ||
| 97 | #define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); } | 100 | #define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); } |
| 98 | 101 | ||
diff --git a/include/linux/raid/md_k.h b/include/linux/raid/md_k.h index 617b9506c760..e2df61f5b09a 100644 --- a/include/linux/raid/md_k.h +++ b/include/linux/raid/md_k.h | |||
| @@ -132,6 +132,14 @@ struct mddev_s | |||
| 132 | 132 | ||
| 133 | char uuid[16]; | 133 | char uuid[16]; |
| 134 | 134 | ||
| 135 | /* If the array is being reshaped, we need to record the | ||
| 136 | * new shape and an indication of where we are up to. | ||
| 137 | * This is written to the superblock. | ||
| 138 | * If reshape_position is MaxSector, then no reshape is happening (yet). | ||
| 139 | */ | ||
| 140 | sector_t reshape_position; | ||
| 141 | int delta_disks, new_level, new_layout, new_chunk; | ||
| 142 | |||
| 135 | struct mdk_thread_s *thread; /* management thread */ | 143 | struct mdk_thread_s *thread; /* management thread */ |
| 136 | struct mdk_thread_s *sync_thread; /* doing resync or reconstruct */ | 144 | struct mdk_thread_s *sync_thread; /* doing resync or reconstruct */ |
| 137 | sector_t curr_resync; /* blocks scheduled */ | 145 | sector_t curr_resync; /* blocks scheduled */ |
| @@ -143,6 +151,10 @@ struct mddev_s | |||
| 143 | sector_t resync_mismatches; /* count of sectors where | 151 | sector_t resync_mismatches; /* count of sectors where |
| 144 | * parity/replica mismatch found | 152 | * parity/replica mismatch found |
| 145 | */ | 153 | */ |
| 154 | |||
| 155 | /* allow user-space to request suspension of IO to regions of the array */ | ||
| 156 | sector_t suspend_lo; | ||
| 157 | sector_t suspend_hi; | ||
| 146 | /* if zero, use the system-wide default */ | 158 | /* if zero, use the system-wide default */ |
| 147 | int sync_speed_min; | 159 | int sync_speed_min; |
| 148 | int sync_speed_max; | 160 | int sync_speed_max; |
| @@ -157,6 +169,9 @@ struct mddev_s | |||
| 157 | * DONE: thread is done and is waiting to be reaped | 169 | * DONE: thread is done and is waiting to be reaped |
| 158 | * REQUEST: user-space has requested a sync (used with SYNC) | 170 | * REQUEST: user-space has requested a sync (used with SYNC) |
| 159 | * CHECK: user-space request for for check-only, no repair | 171 | * CHECK: user-space request for for check-only, no repair |
| 172 | * RESHAPE: A reshape is happening | ||
| 173 | * | ||
| 174 | * If neither SYNC or RESHAPE are set, then it is a recovery. | ||
| 160 | */ | 175 | */ |
| 161 | #define MD_RECOVERY_RUNNING 0 | 176 | #define MD_RECOVERY_RUNNING 0 |
| 162 | #define MD_RECOVERY_SYNC 1 | 177 | #define MD_RECOVERY_SYNC 1 |
| @@ -166,10 +181,11 @@ struct mddev_s | |||
| 166 | #define MD_RECOVERY_NEEDED 5 | 181 | #define MD_RECOVERY_NEEDED 5 |
| 167 | #define MD_RECOVERY_REQUESTED 6 | 182 | #define MD_RECOVERY_REQUESTED 6 |
| 168 | #define MD_RECOVERY_CHECK 7 | 183 | #define MD_RECOVERY_CHECK 7 |
| 184 | #define MD_RECOVERY_RESHAPE 8 | ||
| 169 | unsigned long recovery; | 185 | unsigned long recovery; |
| 170 | 186 | ||
| 171 | int in_sync; /* know to not need resync */ | 187 | int in_sync; /* know to not need resync */ |
| 172 | struct semaphore reconfig_sem; | 188 | struct mutex reconfig_mutex; |
| 173 | atomic_t active; | 189 | atomic_t active; |
| 174 | 190 | ||
| 175 | int changed; /* true if we might need to reread partition info */ | 191 | int changed; /* true if we might need to reread partition info */ |
| @@ -249,7 +265,8 @@ struct mdk_personality | |||
| 249 | int (*spare_active) (mddev_t *mddev); | 265 | int (*spare_active) (mddev_t *mddev); |
| 250 | sector_t (*sync_request)(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster); | 266 | sector_t (*sync_request)(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster); |
| 251 | int (*resize) (mddev_t *mddev, sector_t sectors); | 267 | int (*resize) (mddev_t *mddev, sector_t sectors); |
| 252 | int (*reshape) (mddev_t *mddev, int raid_disks); | 268 | int (*check_reshape) (mddev_t *mddev); |
| 269 | int (*start_reshape) (mddev_t *mddev); | ||
| 253 | int (*reconfig) (mddev_t *mddev, int layout, int chunk_size); | 270 | int (*reconfig) (mddev_t *mddev, int layout, int chunk_size); |
| 254 | /* quiesce moves between quiescence states | 271 | /* quiesce moves between quiescence states |
| 255 | * 0 - fully active | 272 | * 0 - fully active |
diff --git a/include/linux/raid/md_p.h b/include/linux/raid/md_p.h index c100fa5d4bfa..774e1acfb8c4 100644 --- a/include/linux/raid/md_p.h +++ b/include/linux/raid/md_p.h | |||
| @@ -102,6 +102,18 @@ typedef struct mdp_device_descriptor_s { | |||
| 102 | #define MD_SB_ERRORS 1 | 102 | #define MD_SB_ERRORS 1 |
| 103 | 103 | ||
| 104 | #define MD_SB_BITMAP_PRESENT 8 /* bitmap may be present nearby */ | 104 | #define MD_SB_BITMAP_PRESENT 8 /* bitmap may be present nearby */ |
| 105 | |||
| 106 | /* | ||
| 107 | * Notes: | ||
| 108 | * - if an array is being reshaped (restriped) in order to change the | ||
| 109 | * the number of active devices in the array, 'raid_disks' will be | ||
| 110 | * the larger of the old and new numbers. 'delta_disks' will | ||
| 111 | * be the "new - old". So if +ve, raid_disks is the new value, and | ||
| 112 | * "raid_disks-delta_disks" is the old. If -ve, raid_disks is the | ||
| 113 | * old value and "raid_disks+delta_disks" is the new (smaller) value. | ||
| 114 | */ | ||
| 115 | |||
| 116 | |||
| 105 | typedef struct mdp_superblock_s { | 117 | typedef struct mdp_superblock_s { |
| 106 | /* | 118 | /* |
| 107 | * Constant generic information | 119 | * Constant generic information |
| @@ -146,7 +158,13 @@ typedef struct mdp_superblock_s { | |||
| 146 | __u32 cp_events_hi; /* 10 high-order of checkpoint update count */ | 158 | __u32 cp_events_hi; /* 10 high-order of checkpoint update count */ |
| 147 | #endif | 159 | #endif |
| 148 | __u32 recovery_cp; /* 11 recovery checkpoint sector count */ | 160 | __u32 recovery_cp; /* 11 recovery checkpoint sector count */ |
| 149 | __u32 gstate_sreserved[MD_SB_GENERIC_STATE_WORDS - 12]; | 161 | /* There are only valid for minor_version > 90 */ |
| 162 | __u64 reshape_position; /* 12,13 next address in array-space for reshape */ | ||
| 163 | __u32 new_level; /* 14 new level we are reshaping to */ | ||
| 164 | __u32 delta_disks; /* 15 change in number of raid_disks */ | ||
| 165 | __u32 new_layout; /* 16 new layout */ | ||
| 166 | __u32 new_chunk; /* 17 new chunk size (bytes) */ | ||
| 167 | __u32 gstate_sreserved[MD_SB_GENERIC_STATE_WORDS - 18]; | ||
| 150 | 168 | ||
| 151 | /* | 169 | /* |
| 152 | * Personality information | 170 | * Personality information |
| @@ -207,7 +225,14 @@ struct mdp_superblock_1 { | |||
| 207 | * NOTE: signed, so bitmap can be before superblock | 225 | * NOTE: signed, so bitmap can be before superblock |
| 208 | * only meaningful of feature_map[0] is set. | 226 | * only meaningful of feature_map[0] is set. |
| 209 | */ | 227 | */ |
| 210 | __u8 pad1[128-100]; /* set to 0 when written */ | 228 | |
| 229 | /* These are only valid with feature bit '4' */ | ||
| 230 | __u64 reshape_position; /* next address in array-space for reshape */ | ||
| 231 | __u32 new_level; /* new level we are reshaping to */ | ||
| 232 | __u32 delta_disks; /* change in number of raid_disks */ | ||
| 233 | __u32 new_layout; /* new layout */ | ||
| 234 | __u32 new_chunk; /* new chunk size (bytes) */ | ||
| 235 | __u8 pad1[128-124]; /* set to 0 when written */ | ||
| 211 | 236 | ||
| 212 | /* constant this-device information - 64 bytes */ | 237 | /* constant this-device information - 64 bytes */ |
| 213 | __u64 data_offset; /* sector start of data, often 0 */ | 238 | __u64 data_offset; /* sector start of data, often 0 */ |
| @@ -240,8 +265,9 @@ struct mdp_superblock_1 { | |||
| 240 | 265 | ||
| 241 | /* feature_map bits */ | 266 | /* feature_map bits */ |
| 242 | #define MD_FEATURE_BITMAP_OFFSET 1 | 267 | #define MD_FEATURE_BITMAP_OFFSET 1 |
| 268 | #define MD_FEATURE_RESHAPE_ACTIVE 4 | ||
| 243 | 269 | ||
| 244 | #define MD_FEATURE_ALL 1 | 270 | #define MD_FEATURE_ALL 5 |
| 245 | 271 | ||
| 246 | #endif | 272 | #endif |
| 247 | 273 | ||
diff --git a/include/linux/raid/raid5.h b/include/linux/raid/raid5.h index 394da8207b34..914af667044f 100644 --- a/include/linux/raid/raid5.h +++ b/include/linux/raid/raid5.h | |||
| @@ -135,6 +135,7 @@ struct stripe_head { | |||
| 135 | atomic_t count; /* nr of active thread/requests */ | 135 | atomic_t count; /* nr of active thread/requests */ |
| 136 | spinlock_t lock; | 136 | spinlock_t lock; |
| 137 | int bm_seq; /* sequence number for bitmap flushes */ | 137 | int bm_seq; /* sequence number for bitmap flushes */ |
| 138 | int disks; /* disks in stripe */ | ||
| 138 | struct r5dev { | 139 | struct r5dev { |
| 139 | struct bio req; | 140 | struct bio req; |
| 140 | struct bio_vec vec; | 141 | struct bio_vec vec; |
| @@ -156,6 +157,7 @@ struct stripe_head { | |||
| 156 | #define R5_ReadError 8 /* seen a read error here recently */ | 157 | #define R5_ReadError 8 /* seen a read error here recently */ |
| 157 | #define R5_ReWrite 9 /* have tried to over-write the readerror */ | 158 | #define R5_ReWrite 9 /* have tried to over-write the readerror */ |
| 158 | 159 | ||
| 160 | #define R5_Expanded 10 /* This block now has post-expand data */ | ||
| 159 | /* | 161 | /* |
| 160 | * Write method | 162 | * Write method |
| 161 | */ | 163 | */ |
| @@ -174,7 +176,9 @@ struct stripe_head { | |||
| 174 | #define STRIPE_DELAYED 6 | 176 | #define STRIPE_DELAYED 6 |
| 175 | #define STRIPE_DEGRADED 7 | 177 | #define STRIPE_DEGRADED 7 |
| 176 | #define STRIPE_BIT_DELAY 8 | 178 | #define STRIPE_BIT_DELAY 8 |
| 177 | 179 | #define STRIPE_EXPANDING 9 | |
| 180 | #define STRIPE_EXPAND_SOURCE 10 | ||
| 181 | #define STRIPE_EXPAND_READY 11 | ||
| 178 | /* | 182 | /* |
| 179 | * Plugging: | 183 | * Plugging: |
| 180 | * | 184 | * |
| @@ -211,12 +215,24 @@ struct raid5_private_data { | |||
| 211 | int raid_disks, working_disks, failed_disks; | 215 | int raid_disks, working_disks, failed_disks; |
| 212 | int max_nr_stripes; | 216 | int max_nr_stripes; |
| 213 | 217 | ||
| 218 | /* used during an expand */ | ||
| 219 | sector_t expand_progress; /* MaxSector when no expand happening */ | ||
| 220 | sector_t expand_lo; /* from here up to expand_progress it out-of-bounds | ||
| 221 | * as we haven't flushed the metadata yet | ||
| 222 | */ | ||
| 223 | int previous_raid_disks; | ||
| 224 | |||
| 214 | struct list_head handle_list; /* stripes needing handling */ | 225 | struct list_head handle_list; /* stripes needing handling */ |
| 215 | struct list_head delayed_list; /* stripes that have plugged requests */ | 226 | struct list_head delayed_list; /* stripes that have plugged requests */ |
| 216 | struct list_head bitmap_list; /* stripes delaying awaiting bitmap update */ | 227 | struct list_head bitmap_list; /* stripes delaying awaiting bitmap update */ |
| 217 | atomic_t preread_active_stripes; /* stripes with scheduled io */ | 228 | atomic_t preread_active_stripes; /* stripes with scheduled io */ |
| 218 | 229 | ||
| 219 | char cache_name[20]; | 230 | atomic_t reshape_stripes; /* stripes with pending writes for reshape */ |
| 231 | /* unfortunately we need two cache names as we temporarily have | ||
| 232 | * two caches. | ||
| 233 | */ | ||
| 234 | int active_name; | ||
| 235 | char cache_name[2][20]; | ||
| 220 | kmem_cache_t *slab_cache; /* for allocating stripes */ | 236 | kmem_cache_t *slab_cache; /* for allocating stripes */ |
| 221 | 237 | ||
| 222 | int seq_flush, seq_write; | 238 | int seq_flush, seq_write; |
| @@ -238,9 +254,10 @@ struct raid5_private_data { | |||
| 238 | wait_queue_head_t wait_for_overlap; | 254 | wait_queue_head_t wait_for_overlap; |
| 239 | int inactive_blocked; /* release of inactive stripes blocked, | 255 | int inactive_blocked; /* release of inactive stripes blocked, |
| 240 | * waiting for 25% to be free | 256 | * waiting for 25% to be free |
| 241 | */ | 257 | */ |
| 258 | int pool_size; /* number of disks in stripeheads in pool */ | ||
| 242 | spinlock_t device_lock; | 259 | spinlock_t device_lock; |
| 243 | struct disk_info disks[0]; | 260 | struct disk_info *disks; |
| 244 | }; | 261 | }; |
| 245 | 262 | ||
| 246 | typedef struct raid5_private_data raid5_conf_t; | 263 | typedef struct raid5_private_data raid5_conf_t; |
diff --git a/include/linux/rtc.h b/include/linux/rtc.h index b739ac1f7ca0..ab61cd1199f2 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h | |||
| @@ -91,10 +91,102 @@ struct rtc_pll_info { | |||
| 91 | #define RTC_PLL_GET _IOR('p', 0x11, struct rtc_pll_info) /* Get PLL correction */ | 91 | #define RTC_PLL_GET _IOR('p', 0x11, struct rtc_pll_info) /* Get PLL correction */ |
| 92 | #define RTC_PLL_SET _IOW('p', 0x12, struct rtc_pll_info) /* Set PLL correction */ | 92 | #define RTC_PLL_SET _IOW('p', 0x12, struct rtc_pll_info) /* Set PLL correction */ |
| 93 | 93 | ||
| 94 | /* interrupt flags */ | ||
| 95 | #define RTC_IRQF 0x80 /* any of the following is active */ | ||
| 96 | #define RTC_PF 0x40 | ||
| 97 | #define RTC_AF 0x20 | ||
| 98 | #define RTC_UF 0x10 | ||
| 99 | |||
| 94 | #ifdef __KERNEL__ | 100 | #ifdef __KERNEL__ |
| 95 | 101 | ||
| 96 | #include <linux/interrupt.h> | 102 | #include <linux/interrupt.h> |
| 97 | 103 | ||
| 104 | extern int rtc_month_days(unsigned int month, unsigned int year); | ||
| 105 | extern int rtc_valid_tm(struct rtc_time *tm); | ||
| 106 | extern int rtc_tm_to_time(struct rtc_time *tm, unsigned long *time); | ||
| 107 | extern void rtc_time_to_tm(unsigned long time, struct rtc_time *tm); | ||
| 108 | |||
| 109 | #include <linux/device.h> | ||
| 110 | #include <linux/seq_file.h> | ||
| 111 | #include <linux/cdev.h> | ||
| 112 | #include <linux/poll.h> | ||
| 113 | #include <linux/mutex.h> | ||
| 114 | |||
| 115 | extern struct class *rtc_class; | ||
| 116 | |||
| 117 | struct rtc_class_ops { | ||
| 118 | int (*open)(struct device *); | ||
| 119 | void (*release)(struct device *); | ||
| 120 | int (*ioctl)(struct device *, unsigned int, unsigned long); | ||
| 121 | int (*read_time)(struct device *, struct rtc_time *); | ||
| 122 | int (*set_time)(struct device *, struct rtc_time *); | ||
| 123 | int (*read_alarm)(struct device *, struct rtc_wkalrm *); | ||
| 124 | int (*set_alarm)(struct device *, struct rtc_wkalrm *); | ||
| 125 | int (*proc)(struct device *, struct seq_file *); | ||
| 126 | int (*set_mmss)(struct device *, unsigned long secs); | ||
| 127 | int (*irq_set_state)(struct device *, int enabled); | ||
| 128 | int (*irq_set_freq)(struct device *, int freq); | ||
| 129 | int (*read_callback)(struct device *, int data); | ||
| 130 | }; | ||
| 131 | |||
| 132 | #define RTC_DEVICE_NAME_SIZE 20 | ||
| 133 | struct rtc_task; | ||
| 134 | |||
| 135 | struct rtc_device | ||
| 136 | { | ||
| 137 | struct class_device class_dev; | ||
| 138 | struct module *owner; | ||
| 139 | |||
| 140 | int id; | ||
| 141 | char name[RTC_DEVICE_NAME_SIZE]; | ||
| 142 | |||
| 143 | struct rtc_class_ops *ops; | ||
| 144 | struct mutex ops_lock; | ||
| 145 | |||
| 146 | struct class_device *rtc_dev; | ||
| 147 | struct cdev char_dev; | ||
| 148 | struct mutex char_lock; | ||
| 149 | |||
| 150 | unsigned long irq_data; | ||
| 151 | spinlock_t irq_lock; | ||
| 152 | wait_queue_head_t irq_queue; | ||
| 153 | struct fasync_struct *async_queue; | ||
| 154 | |||
| 155 | struct rtc_task *irq_task; | ||
| 156 | spinlock_t irq_task_lock; | ||
| 157 | int irq_freq; | ||
| 158 | }; | ||
| 159 | #define to_rtc_device(d) container_of(d, struct rtc_device, class_dev) | ||
| 160 | |||
| 161 | extern struct rtc_device *rtc_device_register(const char *name, | ||
| 162 | struct device *dev, | ||
| 163 | struct rtc_class_ops *ops, | ||
| 164 | struct module *owner); | ||
| 165 | extern void rtc_device_unregister(struct rtc_device *rdev); | ||
| 166 | extern int rtc_interface_register(struct class_interface *intf); | ||
| 167 | |||
| 168 | extern int rtc_read_time(struct class_device *class_dev, struct rtc_time *tm); | ||
| 169 | extern int rtc_set_time(struct class_device *class_dev, struct rtc_time *tm); | ||
| 170 | extern int rtc_set_mmss(struct class_device *class_dev, unsigned long secs); | ||
| 171 | extern int rtc_read_alarm(struct class_device *class_dev, | ||
| 172 | struct rtc_wkalrm *alrm); | ||
| 173 | extern int rtc_set_alarm(struct class_device *class_dev, | ||
| 174 | struct rtc_wkalrm *alrm); | ||
| 175 | extern void rtc_update_irq(struct class_device *class_dev, | ||
| 176 | unsigned long num, unsigned long events); | ||
| 177 | |||
| 178 | extern struct class_device *rtc_class_open(char *name); | ||
| 179 | extern void rtc_class_close(struct class_device *class_dev); | ||
| 180 | |||
| 181 | extern int rtc_irq_register(struct class_device *class_dev, | ||
| 182 | struct rtc_task *task); | ||
| 183 | extern void rtc_irq_unregister(struct class_device *class_dev, | ||
| 184 | struct rtc_task *task); | ||
| 185 | extern int rtc_irq_set_state(struct class_device *class_dev, | ||
| 186 | struct rtc_task *task, int enabled); | ||
| 187 | extern int rtc_irq_set_freq(struct class_device *class_dev, | ||
| 188 | struct rtc_task *task, int freq); | ||
| 189 | |||
| 98 | typedef struct rtc_task { | 190 | typedef struct rtc_task { |
| 99 | void (*func)(void *private_data); | 191 | void (*func)(void *private_data); |
| 100 | void *private_data; | 192 | void *private_data; |
diff --git a/include/linux/sched.h b/include/linux/sched.h index e0054c1b9a09..20b4f0372e44 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -35,6 +35,7 @@ | |||
| 35 | #include <linux/topology.h> | 35 | #include <linux/topology.h> |
| 36 | #include <linux/seccomp.h> | 36 | #include <linux/seccomp.h> |
| 37 | #include <linux/rcupdate.h> | 37 | #include <linux/rcupdate.h> |
| 38 | #include <linux/futex.h> | ||
| 38 | 39 | ||
| 39 | #include <linux/auxvec.h> /* For AT_VECTOR_SIZE */ | 40 | #include <linux/auxvec.h> /* For AT_VECTOR_SIZE */ |
| 40 | 41 | ||
| @@ -402,6 +403,7 @@ struct signal_struct { | |||
| 402 | 403 | ||
| 403 | /* ITIMER_REAL timer for the process */ | 404 | /* ITIMER_REAL timer for the process */ |
| 404 | struct hrtimer real_timer; | 405 | struct hrtimer real_timer; |
| 406 | struct task_struct *tsk; | ||
| 405 | ktime_t it_real_incr; | 407 | ktime_t it_real_incr; |
| 406 | 408 | ||
| 407 | /* ITIMER_PROF and ITIMER_VIRTUAL timers for the process */ | 409 | /* ITIMER_PROF and ITIMER_VIRTUAL timers for the process */ |
| @@ -871,6 +873,11 @@ struct task_struct { | |||
| 871 | int cpuset_mems_generation; | 873 | int cpuset_mems_generation; |
| 872 | int cpuset_mem_spread_rotor; | 874 | int cpuset_mem_spread_rotor; |
| 873 | #endif | 875 | #endif |
| 876 | struct robust_list_head __user *robust_list; | ||
| 877 | #ifdef CONFIG_COMPAT | ||
| 878 | struct compat_robust_list_head __user *compat_robust_list; | ||
| 879 | #endif | ||
| 880 | |||
| 874 | atomic_t fs_excl; /* holding fs exclusive resources */ | 881 | atomic_t fs_excl; /* holding fs exclusive resources */ |
| 875 | struct rcu_head rcu; | 882 | struct rcu_head rcu; |
| 876 | }; | 883 | }; |
diff --git a/include/linux/serio.h b/include/linux/serio.h index aa4d6493a034..690aabca8ed0 100644 --- a/include/linux/serio.h +++ b/include/linux/serio.h | |||
| @@ -119,7 +119,7 @@ static inline void serio_cleanup(struct serio *serio) | |||
| 119 | } | 119 | } |
| 120 | 120 | ||
| 121 | /* | 121 | /* |
| 122 | * Use the following fucntions to manipulate serio's per-port | 122 | * Use the following functions to manipulate serio's per-port |
| 123 | * driver-specific data. | 123 | * driver-specific data. |
| 124 | */ | 124 | */ |
| 125 | static inline void *serio_get_drvdata(struct serio *serio) | 125 | static inline void *serio_get_drvdata(struct serio *serio) |
| @@ -133,7 +133,7 @@ static inline void serio_set_drvdata(struct serio *serio, void *data) | |||
| 133 | } | 133 | } |
| 134 | 134 | ||
| 135 | /* | 135 | /* |
| 136 | * Use the following fucntions to protect critical sections in | 136 | * Use the following functions to protect critical sections in |
| 137 | * driver code from port's interrupt handler | 137 | * driver code from port's interrupt handler |
| 138 | */ | 138 | */ |
| 139 | static inline void serio_pause_rx(struct serio *serio) | 139 | static inline void serio_pause_rx(struct serio *serio) |
| @@ -147,7 +147,7 @@ static inline void serio_continue_rx(struct serio *serio) | |||
| 147 | } | 147 | } |
| 148 | 148 | ||
| 149 | /* | 149 | /* |
| 150 | * Use the following fucntions to pin serio's driver in process context | 150 | * Use the following functions to pin serio's driver in process context |
| 151 | */ | 151 | */ |
| 152 | static inline int serio_pin_driver(struct serio *serio) | 152 | static inline int serio_pin_driver(struct serio *serio) |
| 153 | { | 153 | { |
diff --git a/include/linux/smp.h b/include/linux/smp.h index d699a16b0cb2..e2fa3ab4afc5 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h | |||
| @@ -82,7 +82,11 @@ void smp_prepare_boot_cpu(void); | |||
| 82 | */ | 82 | */ |
| 83 | #define raw_smp_processor_id() 0 | 83 | #define raw_smp_processor_id() 0 |
| 84 | #define hard_smp_processor_id() 0 | 84 | #define hard_smp_processor_id() 0 |
| 85 | #define smp_call_function(func,info,retry,wait) ({ 0; }) | 85 | static inline int up_smp_call_function(void) |
| 86 | { | ||
| 87 | return 0; | ||
| 88 | } | ||
| 89 | #define smp_call_function(func,info,retry,wait) (up_smp_call_function()) | ||
| 86 | #define on_each_cpu(func,info,retry,wait) \ | 90 | #define on_each_cpu(func,info,retry,wait) \ |
| 87 | ({ \ | 91 | ({ \ |
| 88 | local_irq_disable(); \ | 92 | local_irq_disable(); \ |
diff --git a/include/linux/stat.h b/include/linux/stat.h index 8ff2a122dfef..8669291352db 100644 --- a/include/linux/stat.h +++ b/include/linux/stat.h | |||
| @@ -69,7 +69,7 @@ struct kstat { | |||
| 69 | struct timespec mtime; | 69 | struct timespec mtime; |
| 70 | struct timespec ctime; | 70 | struct timespec ctime; |
| 71 | unsigned long blksize; | 71 | unsigned long blksize; |
| 72 | unsigned long blocks; | 72 | unsigned long long blocks; |
| 73 | }; | 73 | }; |
| 74 | 74 | ||
| 75 | #endif | 75 | #endif |
diff --git a/include/linux/statfs.h b/include/linux/statfs.h index ad83a2bdb821..b34cc829f98d 100644 --- a/include/linux/statfs.h +++ b/include/linux/statfs.h | |||
| @@ -8,11 +8,11 @@ | |||
| 8 | struct kstatfs { | 8 | struct kstatfs { |
| 9 | long f_type; | 9 | long f_type; |
| 10 | long f_bsize; | 10 | long f_bsize; |
| 11 | sector_t f_blocks; | 11 | u64 f_blocks; |
| 12 | sector_t f_bfree; | 12 | u64 f_bfree; |
| 13 | sector_t f_bavail; | 13 | u64 f_bavail; |
| 14 | sector_t f_files; | 14 | u64 f_files; |
| 15 | sector_t f_ffree; | 15 | u64 f_ffree; |
| 16 | __kernel_fsid_t f_fsid; | 16 | __kernel_fsid_t f_fsid; |
| 17 | long f_namelen; | 17 | long f_namelen; |
| 18 | long f_frsize; | 18 | long f_frsize; |
diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h index c4e3ea7cf154..b5612c958cce 100644 --- a/include/linux/sunrpc/cache.h +++ b/include/linux/sunrpc/cache.h | |||
| @@ -50,7 +50,7 @@ struct cache_head { | |||
| 50 | time_t last_refresh; /* If CACHE_PENDING, this is when upcall | 50 | time_t last_refresh; /* If CACHE_PENDING, this is when upcall |
| 51 | * was sent, else this is when update was received | 51 | * was sent, else this is when update was received |
| 52 | */ | 52 | */ |
| 53 | atomic_t refcnt; | 53 | struct kref ref; |
| 54 | unsigned long flags; | 54 | unsigned long flags; |
| 55 | }; | 55 | }; |
| 56 | #define CACHE_VALID 0 /* Entry contains valid data */ | 56 | #define CACHE_VALID 0 /* Entry contains valid data */ |
| @@ -68,8 +68,7 @@ struct cache_detail { | |||
| 68 | atomic_t inuse; /* active user-space update or lookup */ | 68 | atomic_t inuse; /* active user-space update or lookup */ |
| 69 | 69 | ||
| 70 | char *name; | 70 | char *name; |
| 71 | void (*cache_put)(struct cache_head *, | 71 | void (*cache_put)(struct kref *); |
| 72 | struct cache_detail*); | ||
| 73 | 72 | ||
| 74 | void (*cache_request)(struct cache_detail *cd, | 73 | void (*cache_request)(struct cache_detail *cd, |
| 75 | struct cache_head *h, | 74 | struct cache_head *h, |
| @@ -81,6 +80,11 @@ struct cache_detail { | |||
| 81 | struct cache_detail *cd, | 80 | struct cache_detail *cd, |
| 82 | struct cache_head *h); | 81 | struct cache_head *h); |
| 83 | 82 | ||
| 83 | struct cache_head * (*alloc)(void); | ||
| 84 | int (*match)(struct cache_head *orig, struct cache_head *new); | ||
| 85 | void (*init)(struct cache_head *orig, struct cache_head *new); | ||
| 86 | void (*update)(struct cache_head *orig, struct cache_head *new); | ||
| 87 | |||
| 84 | /* fields below this comment are for internal use | 88 | /* fields below this comment are for internal use |
| 85 | * and should not be touched by cache owners | 89 | * and should not be touched by cache owners |
| 86 | */ | 90 | */ |
| @@ -123,126 +127,14 @@ struct cache_deferred_req { | |||
| 123 | int too_many); | 127 | int too_many); |
| 124 | }; | 128 | }; |
| 125 | 129 | ||
| 126 | /* | ||
| 127 | * just like a template in C++, this macro does cache lookup | ||
| 128 | * for us. | ||
| 129 | * The function is passed some sort of HANDLE from which a cache_detail | ||
| 130 | * structure can be determined (via SETUP, DETAIL), a template | ||
| 131 | * cache entry (type RTN*), and a "set" flag. Using the HASHFN and the | ||
| 132 | * TEST, the function will try to find a matching cache entry in the cache. | ||
| 133 | * If "set" == 0 : | ||
| 134 | * If an entry is found, it is returned | ||
| 135 | * If no entry is found, a new non-VALID entry is created. | ||
| 136 | * If "set" == 1 and INPLACE == 0 : | ||
| 137 | * If no entry is found a new one is inserted with data from "template" | ||
| 138 | * If a non-CACHE_VALID entry is found, it is updated from template using UPDATE | ||
| 139 | * If a CACHE_VALID entry is found, a new entry is swapped in with data | ||
| 140 | * from "template" | ||
| 141 | * If set == 1, and INPLACE == 1 : | ||
| 142 | * As above, except that if a CACHE_VALID entry is found, we UPDATE in place | ||
| 143 | * instead of swapping in a new entry. | ||
| 144 | * | ||
| 145 | * If the passed handle has the CACHE_NEGATIVE flag set, then UPDATE is not | ||
| 146 | * run but insteead CACHE_NEGATIVE is set in any new item. | ||
| 147 | 130 | ||
| 148 | * In any case, the new entry is returned with a reference count. | 131 | extern struct cache_head * |
| 149 | * | 132 | sunrpc_cache_lookup(struct cache_detail *detail, |
| 150 | * | 133 | struct cache_head *key, int hash); |
| 151 | * RTN is a struct type for a cache entry | 134 | extern struct cache_head * |
| 152 | * MEMBER is the member of the cache which is cache_head, which must be first | 135 | sunrpc_cache_update(struct cache_detail *detail, |
| 153 | * FNAME is the name for the function | 136 | struct cache_head *new, struct cache_head *old, int hash); |
| 154 | * ARGS are arguments to function and must contain RTN *item, int set. May | ||
| 155 | * also contain something to be usedby SETUP or DETAIL to find cache_detail. | ||
| 156 | * SETUP locates the cache detail and makes it available as... | ||
| 157 | * DETAIL identifies the cache detail, possibly set up by SETUP | ||
| 158 | * HASHFN returns a hash value of the cache entry "item" | ||
| 159 | * TEST tests if "tmp" matches "item" | ||
| 160 | * INIT copies key information from "item" to "new" | ||
| 161 | * UPDATE copies content information from "item" to "tmp" | ||
| 162 | * INPLACE is true if updates can happen inplace rather than allocating a new structure | ||
| 163 | * | ||
| 164 | * WARNING: any substantial changes to this must be reflected in | ||
| 165 | * net/sunrpc/svcauth.c(auth_domain_lookup) | ||
| 166 | * which is a similar routine that is open-coded. | ||
| 167 | */ | ||
| 168 | #define DefineCacheLookup(RTN,MEMBER,FNAME,ARGS,SETUP,DETAIL,HASHFN,TEST,INIT,UPDATE,INPLACE) \ | ||
| 169 | RTN *FNAME ARGS \ | ||
| 170 | { \ | ||
| 171 | RTN *tmp, *new=NULL; \ | ||
| 172 | struct cache_head **hp, **head; \ | ||
| 173 | SETUP; \ | ||
| 174 | head = &(DETAIL)->hash_table[HASHFN]; \ | ||
| 175 | retry: \ | ||
| 176 | if (set||new) write_lock(&(DETAIL)->hash_lock); \ | ||
| 177 | else read_lock(&(DETAIL)->hash_lock); \ | ||
| 178 | for(hp=head; *hp != NULL; hp = &tmp->MEMBER.next) { \ | ||
| 179 | tmp = container_of(*hp, RTN, MEMBER); \ | ||
| 180 | if (TEST) { /* found a match */ \ | ||
| 181 | \ | ||
| 182 | if (set && !INPLACE && test_bit(CACHE_VALID, &tmp->MEMBER.flags) && !new) \ | ||
| 183 | break; \ | ||
| 184 | \ | ||
| 185 | if (new) \ | ||
| 186 | {INIT;} \ | ||
| 187 | if (set) { \ | ||
| 188 | if (!INPLACE && test_bit(CACHE_VALID, &tmp->MEMBER.flags))\ | ||
| 189 | { /* need to swap in new */ \ | ||
| 190 | RTN *t2; \ | ||
| 191 | \ | ||
| 192 | new->MEMBER.next = tmp->MEMBER.next; \ | ||
| 193 | *hp = &new->MEMBER; \ | ||
| 194 | tmp->MEMBER.next = NULL; \ | ||
| 195 | t2 = tmp; tmp = new; new = t2; \ | ||
| 196 | } \ | ||
| 197 | if (test_bit(CACHE_NEGATIVE, &item->MEMBER.flags)) \ | ||
| 198 | set_bit(CACHE_NEGATIVE, &tmp->MEMBER.flags); \ | ||
| 199 | else { \ | ||
| 200 | UPDATE; \ | ||
| 201 | clear_bit(CACHE_NEGATIVE, &tmp->MEMBER.flags); \ | ||
| 202 | } \ | ||
| 203 | } \ | ||
| 204 | cache_get(&tmp->MEMBER); \ | ||
| 205 | if (set||new) write_unlock(&(DETAIL)->hash_lock); \ | ||
| 206 | else read_unlock(&(DETAIL)->hash_lock); \ | ||
| 207 | if (set) \ | ||
| 208 | cache_fresh(DETAIL, &tmp->MEMBER, item->MEMBER.expiry_time); \ | ||
| 209 | if (set && !INPLACE && new) cache_fresh(DETAIL, &new->MEMBER, 0); \ | ||
| 210 | if (new) (DETAIL)->cache_put(&new->MEMBER, DETAIL); \ | ||
| 211 | return tmp; \ | ||
| 212 | } \ | ||
| 213 | } \ | ||
| 214 | /* Didn't find anything */ \ | ||
| 215 | if (new) { \ | ||
| 216 | INIT; \ | ||
| 217 | new->MEMBER.next = *head; \ | ||
| 218 | *head = &new->MEMBER; \ | ||
| 219 | (DETAIL)->entries ++; \ | ||
| 220 | cache_get(&new->MEMBER); \ | ||
| 221 | if (set) { \ | ||
| 222 | tmp = new; \ | ||
| 223 | if (test_bit(CACHE_NEGATIVE, &item->MEMBER.flags)) \ | ||
| 224 | set_bit(CACHE_NEGATIVE, &tmp->MEMBER.flags); \ | ||
| 225 | else {UPDATE;} \ | ||
| 226 | } \ | ||
| 227 | } \ | ||
| 228 | if (set||new) write_unlock(&(DETAIL)->hash_lock); \ | ||
| 229 | else read_unlock(&(DETAIL)->hash_lock); \ | ||
| 230 | if (new && set) \ | ||
| 231 | cache_fresh(DETAIL, &new->MEMBER, item->MEMBER.expiry_time); \ | ||
| 232 | if (new) \ | ||
| 233 | return new; \ | ||
| 234 | new = kmalloc(sizeof(*new), GFP_KERNEL); \ | ||
| 235 | if (new) { \ | ||
| 236 | cache_init(&new->MEMBER); \ | ||
| 237 | goto retry; \ | ||
| 238 | } \ | ||
| 239 | return NULL; \ | ||
| 240 | } | ||
| 241 | 137 | ||
| 242 | #define DefineSimpleCacheLookup(STRUCT,INPLACE) \ | ||
| 243 | DefineCacheLookup(struct STRUCT, h, STRUCT##_lookup, (struct STRUCT *item, int set), /*no setup */, \ | ||
| 244 | & STRUCT##_cache, STRUCT##_hash(item), STRUCT##_match(item, tmp),\ | ||
| 245 | STRUCT##_init(new, item), STRUCT##_update(tmp, item),INPLACE) | ||
| 246 | 138 | ||
| 247 | #define cache_for_each(pos, detail, index, member) \ | 139 | #define cache_for_each(pos, detail, index, member) \ |
| 248 | for (({read_lock(&(detail)->hash_lock); index = (detail)->hash_size;}) ; \ | 140 | for (({read_lock(&(detail)->hash_lock); index = (detail)->hash_size;}) ; \ |
| @@ -258,22 +150,19 @@ extern void cache_clean_deferred(void *owner); | |||
| 258 | 150 | ||
| 259 | static inline struct cache_head *cache_get(struct cache_head *h) | 151 | static inline struct cache_head *cache_get(struct cache_head *h) |
| 260 | { | 152 | { |
| 261 | atomic_inc(&h->refcnt); | 153 | kref_get(&h->ref); |
| 262 | return h; | 154 | return h; |
| 263 | } | 155 | } |
| 264 | 156 | ||
| 265 | 157 | ||
| 266 | static inline int cache_put(struct cache_head *h, struct cache_detail *cd) | 158 | static inline void cache_put(struct cache_head *h, struct cache_detail *cd) |
| 267 | { | 159 | { |
| 268 | if (atomic_read(&h->refcnt) <= 2 && | 160 | if (atomic_read(&h->ref.refcount) <= 2 && |
| 269 | h->expiry_time < cd->nextcheck) | 161 | h->expiry_time < cd->nextcheck) |
| 270 | cd->nextcheck = h->expiry_time; | 162 | cd->nextcheck = h->expiry_time; |
| 271 | return atomic_dec_and_test(&h->refcnt); | 163 | kref_put(&h->ref, cd->cache_put); |
| 272 | } | 164 | } |
| 273 | 165 | ||
| 274 | extern void cache_init(struct cache_head *h); | ||
| 275 | extern void cache_fresh(struct cache_detail *detail, | ||
| 276 | struct cache_head *head, time_t expiry); | ||
| 277 | extern int cache_check(struct cache_detail *detail, | 166 | extern int cache_check(struct cache_detail *detail, |
| 278 | struct cache_head *h, struct cache_req *rqstp); | 167 | struct cache_head *h, struct cache_req *rqstp); |
| 279 | extern void cache_flush(void); | 168 | extern void cache_flush(void); |
diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h index c119ce7cbd22..2fe2087edd66 100644 --- a/include/linux/sunrpc/svcauth.h +++ b/include/linux/sunrpc/svcauth.h | |||
| @@ -45,9 +45,10 @@ struct svc_rqst; /* forward decl */ | |||
| 45 | * of ip addresses to the given client. | 45 | * of ip addresses to the given client. |
| 46 | */ | 46 | */ |
| 47 | struct auth_domain { | 47 | struct auth_domain { |
| 48 | struct cache_head h; | 48 | struct kref ref; |
| 49 | struct hlist_node hash; | ||
| 49 | char *name; | 50 | char *name; |
| 50 | int flavour; | 51 | struct auth_ops *flavour; |
| 51 | }; | 52 | }; |
| 52 | 53 | ||
| 53 | /* | 54 | /* |
| @@ -86,6 +87,9 @@ struct auth_domain { | |||
| 86 | * | 87 | * |
| 87 | * domain_release() | 88 | * domain_release() |
| 88 | * This call releases a domain. | 89 | * This call releases a domain. |
| 90 | * set_client() | ||
| 91 | * Givens a pending request (struct svc_rqst), finds and assigns | ||
| 92 | * an appropriate 'auth_domain' as the client. | ||
| 89 | */ | 93 | */ |
| 90 | struct auth_ops { | 94 | struct auth_ops { |
| 91 | char * name; | 95 | char * name; |
| @@ -117,7 +121,7 @@ extern void svc_auth_unregister(rpc_authflavor_t flavor); | |||
| 117 | extern struct auth_domain *unix_domain_find(char *name); | 121 | extern struct auth_domain *unix_domain_find(char *name); |
| 118 | extern void auth_domain_put(struct auth_domain *item); | 122 | extern void auth_domain_put(struct auth_domain *item); |
| 119 | extern int auth_unix_add_addr(struct in_addr addr, struct auth_domain *dom); | 123 | extern int auth_unix_add_addr(struct in_addr addr, struct auth_domain *dom); |
| 120 | extern struct auth_domain *auth_domain_lookup(struct auth_domain *item, int set); | 124 | extern struct auth_domain *auth_domain_lookup(char *name, struct auth_domain *new); |
| 121 | extern struct auth_domain *auth_domain_find(char *name); | 125 | extern struct auth_domain *auth_domain_find(char *name); |
| 122 | extern struct auth_domain *auth_unix_lookup(struct in_addr addr); | 126 | extern struct auth_domain *auth_unix_lookup(struct in_addr addr); |
| 123 | extern int auth_unix_forget_old(struct auth_domain *dom); | 127 | extern int auth_unix_forget_old(struct auth_domain *dom); |
| @@ -160,8 +164,6 @@ static inline unsigned long hash_mem(char *buf, int length, int bits) | |||
| 160 | return hash >> (BITS_PER_LONG - bits); | 164 | return hash >> (BITS_PER_LONG - bits); |
| 161 | } | 165 | } |
| 162 | 166 | ||
| 163 | extern struct cache_detail auth_domain_cache, ip_map_cache; | ||
| 164 | |||
| 165 | #endif /* __KERNEL__ */ | 167 | #endif /* __KERNEL__ */ |
| 166 | 168 | ||
| 167 | #endif /* _LINUX_SUNRPC_SVCAUTH_H_ */ | 169 | #endif /* _LINUX_SUNRPC_SVCAUTH_H_ */ |
diff --git a/include/linux/threads.h b/include/linux/threads.h index b59738ac6197..e646bcdf2614 100644 --- a/include/linux/threads.h +++ b/include/linux/threads.h | |||
| @@ -28,7 +28,8 @@ | |||
| 28 | #define PID_MAX_DEFAULT (CONFIG_BASE_SMALL ? 0x1000 : 0x8000) | 28 | #define PID_MAX_DEFAULT (CONFIG_BASE_SMALL ? 0x1000 : 0x8000) |
| 29 | 29 | ||
| 30 | /* | 30 | /* |
| 31 | * A maximum of 4 million PIDs should be enough for a while: | 31 | * A maximum of 4 million PIDs should be enough for a while. |
| 32 | * [NOTE: PID/TIDs are limited to 2^29 ~= 500+ million, see futex.h.] | ||
| 32 | */ | 33 | */ |
| 33 | #define PID_MAX_LIMIT (CONFIG_BASE_SMALL ? PAGE_SIZE * 8 : \ | 34 | #define PID_MAX_LIMIT (CONFIG_BASE_SMALL ? PAGE_SIZE * 8 : \ |
| 34 | (sizeof(long) > 4 ? 4 * 1024 * 1024 : PID_MAX_DEFAULT)) | 35 | (sizeof(long) > 4 ? 4 * 1024 * 1024 : PID_MAX_DEFAULT)) |
diff --git a/include/linux/time.h b/include/linux/time.h index bf0e785e2e03..0cd696cee998 100644 --- a/include/linux/time.h +++ b/include/linux/time.h | |||
| @@ -73,12 +73,6 @@ extern void set_normalized_timespec(struct timespec *ts, time_t sec, long nsec); | |||
| 73 | #define timespec_valid(ts) \ | 73 | #define timespec_valid(ts) \ |
| 74 | (((ts)->tv_sec >= 0) && (((unsigned long) (ts)->tv_nsec) < NSEC_PER_SEC)) | 74 | (((ts)->tv_sec >= 0) && (((unsigned long) (ts)->tv_nsec) < NSEC_PER_SEC)) |
| 75 | 75 | ||
| 76 | /* | ||
| 77 | * 64-bit nanosec type. Large enough to span 292+ years in nanosecond | ||
| 78 | * resolution. Ought to be enough for a while. | ||
| 79 | */ | ||
| 80 | typedef s64 nsec_t; | ||
| 81 | |||
| 82 | extern struct timespec xtime; | 76 | extern struct timespec xtime; |
| 83 | extern struct timespec wall_to_monotonic; | 77 | extern struct timespec wall_to_monotonic; |
| 84 | extern seqlock_t xtime_lock; | 78 | extern seqlock_t xtime_lock; |
| @@ -114,9 +108,9 @@ extern struct timespec timespec_trunc(struct timespec t, unsigned gran); | |||
| 114 | * Returns the scalar nanosecond representation of the timespec | 108 | * Returns the scalar nanosecond representation of the timespec |
| 115 | * parameter. | 109 | * parameter. |
| 116 | */ | 110 | */ |
| 117 | static inline nsec_t timespec_to_ns(const struct timespec *ts) | 111 | static inline s64 timespec_to_ns(const struct timespec *ts) |
| 118 | { | 112 | { |
| 119 | return ((nsec_t) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec; | 113 | return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec; |
| 120 | } | 114 | } |
| 121 | 115 | ||
| 122 | /** | 116 | /** |
| @@ -126,9 +120,9 @@ static inline nsec_t timespec_to_ns(const struct timespec *ts) | |||
| 126 | * Returns the scalar nanosecond representation of the timeval | 120 | * Returns the scalar nanosecond representation of the timeval |
| 127 | * parameter. | 121 | * parameter. |
| 128 | */ | 122 | */ |
| 129 | static inline nsec_t timeval_to_ns(const struct timeval *tv) | 123 | static inline s64 timeval_to_ns(const struct timeval *tv) |
| 130 | { | 124 | { |
| 131 | return ((nsec_t) tv->tv_sec * NSEC_PER_SEC) + | 125 | return ((s64) tv->tv_sec * NSEC_PER_SEC) + |
| 132 | tv->tv_usec * NSEC_PER_USEC; | 126 | tv->tv_usec * NSEC_PER_USEC; |
| 133 | } | 127 | } |
| 134 | 128 | ||
| @@ -138,7 +132,7 @@ static inline nsec_t timeval_to_ns(const struct timeval *tv) | |||
| 138 | * | 132 | * |
| 139 | * Returns the timespec representation of the nsec parameter. | 133 | * Returns the timespec representation of the nsec parameter. |
| 140 | */ | 134 | */ |
| 141 | extern struct timespec ns_to_timespec(const nsec_t nsec); | 135 | extern struct timespec ns_to_timespec(const s64 nsec); |
| 142 | 136 | ||
| 143 | /** | 137 | /** |
| 144 | * ns_to_timeval - Convert nanoseconds to timeval | 138 | * ns_to_timeval - Convert nanoseconds to timeval |
| @@ -146,7 +140,7 @@ extern struct timespec ns_to_timespec(const nsec_t nsec); | |||
| 146 | * | 140 | * |
| 147 | * Returns the timeval representation of the nsec parameter. | 141 | * Returns the timeval representation of the nsec parameter. |
| 148 | */ | 142 | */ |
| 149 | extern struct timeval ns_to_timeval(const nsec_t nsec); | 143 | extern struct timeval ns_to_timeval(const s64 nsec); |
| 150 | 144 | ||
| 151 | #endif /* __KERNEL__ */ | 145 | #endif /* __KERNEL__ */ |
| 152 | 146 | ||
diff --git a/include/linux/timer.h b/include/linux/timer.h index ee5a09e806e8..b5caabca553c 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h | |||
| @@ -96,6 +96,7 @@ static inline void add_timer(struct timer_list *timer) | |||
| 96 | 96 | ||
| 97 | extern void init_timers(void); | 97 | extern void init_timers(void); |
| 98 | extern void run_local_timers(void); | 98 | extern void run_local_timers(void); |
| 99 | extern int it_real_fn(void *); | 99 | struct hrtimer; |
| 100 | extern int it_real_fn(struct hrtimer *); | ||
| 100 | 101 | ||
| 101 | #endif | 102 | #endif |
diff --git a/include/linux/timex.h b/include/linux/timex.h index 82dc9ae79d37..03914b7e41b1 100644 --- a/include/linux/timex.h +++ b/include/linux/timex.h | |||
| @@ -307,6 +307,8 @@ time_interpolator_reset(void) | |||
| 307 | /* Returns how long ticks are at present, in ns / 2^(SHIFT_SCALE-10). */ | 307 | /* Returns how long ticks are at present, in ns / 2^(SHIFT_SCALE-10). */ |
| 308 | extern u64 current_tick_length(void); | 308 | extern u64 current_tick_length(void); |
| 309 | 309 | ||
| 310 | extern int do_adjtimex(struct timex *); | ||
| 311 | |||
| 310 | #endif /* KERNEL */ | 312 | #endif /* KERNEL */ |
| 311 | 313 | ||
| 312 | #endif /* LINUX_TIMEX_H */ | 314 | #endif /* LINUX_TIMEX_H */ |
diff --git a/include/linux/topology.h b/include/linux/topology.h index e8eb0040ce3a..a305ae2e44b6 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h | |||
| @@ -164,6 +164,15 @@ | |||
| 164 | .nr_balance_failed = 0, \ | 164 | .nr_balance_failed = 0, \ |
| 165 | } | 165 | } |
| 166 | 166 | ||
| 167 | #ifdef CONFIG_SCHED_MC | ||
| 168 | #ifndef SD_MC_INIT | ||
| 169 | /* for now its same as SD_CPU_INIT. | ||
| 170 | * TBD: Tune Domain parameters! | ||
| 171 | */ | ||
| 172 | #define SD_MC_INIT SD_CPU_INIT | ||
| 173 | #endif | ||
| 174 | #endif | ||
| 175 | |||
| 167 | #ifdef CONFIG_NUMA | 176 | #ifdef CONFIG_NUMA |
| 168 | #ifndef SD_NODE_INIT | 177 | #ifndef SD_NODE_INIT |
| 169 | #error Please define an appropriate SD_NODE_INIT in include/asm/topology.h!!! | 178 | #error Please define an appropriate SD_NODE_INIT in include/asm/topology.h!!! |
diff --git a/include/linux/types.h b/include/linux/types.h index 54ae2d59e71b..1046c7ad86d9 100644 --- a/include/linux/types.h +++ b/include/linux/types.h | |||
| @@ -137,6 +137,10 @@ typedef __s64 int64_t; | |||
| 137 | typedef unsigned long sector_t; | 137 | typedef unsigned long sector_t; |
| 138 | #endif | 138 | #endif |
| 139 | 139 | ||
| 140 | #ifndef HAVE_BLKCNT_T | ||
| 141 | typedef unsigned long blkcnt_t; | ||
| 142 | #endif | ||
| 143 | |||
| 140 | /* | 144 | /* |
| 141 | * The type of an index into the pagecache. Use a #define so asm/types.h | 145 | * The type of an index into the pagecache. Use a #define so asm/types.h |
| 142 | * can override it. | 146 | * can override it. |
diff --git a/include/linux/x1205.h b/include/linux/x1205.h deleted file mode 100644 index 64fd3af894a5..000000000000 --- a/include/linux/x1205.h +++ /dev/null | |||
| @@ -1,31 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * x1205.h - defines for drivers/i2c/chips/x1205.c | ||
| 3 | * Copyright 2004 Karen Spearel | ||
| 4 | * Copyright 2005 Alessandro Zummo | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License as published by | ||
| 8 | * the Free Software Foundation; either version 2 of the License, or | ||
| 9 | * (at your option) any later version. | ||
| 10 | */ | ||
| 11 | |||
| 12 | #ifndef __LINUX_X1205_H__ | ||
| 13 | #define __LINUX_X1205_H__ | ||
| 14 | |||
| 15 | /* commands */ | ||
| 16 | |||
| 17 | #define X1205_CMD_GETDATETIME 0 | ||
| 18 | #define X1205_CMD_SETTIME 1 | ||
| 19 | #define X1205_CMD_SETDATETIME 2 | ||
| 20 | #define X1205_CMD_GETALARM 3 | ||
| 21 | #define X1205_CMD_SETALARM 4 | ||
| 22 | #define X1205_CMD_GETDTRIM 5 | ||
| 23 | #define X1205_CMD_SETDTRIM 6 | ||
| 24 | #define X1205_CMD_GETATRIM 7 | ||
| 25 | #define X1205_CMD_SETATRIM 8 | ||
| 26 | |||
| 27 | extern int x1205_do_command(unsigned int cmd, void *arg); | ||
| 28 | extern int x1205_direct_attach(int adapter_id, | ||
| 29 | struct i2c_client_address_data *address_data); | ||
| 30 | |||
| 31 | #endif /* __LINUX_X1205_H__ */ | ||
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index b6f0905a4ee2..916013ca4a5c 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h | |||
| @@ -300,29 +300,30 @@ DECLARE_PER_CPU(struct nf_conntrack_ecache, nf_conntrack_ecache); | |||
| 300 | 300 | ||
| 301 | #define CONNTRACK_ECACHE(x) (__get_cpu_var(nf_conntrack_ecache).x) | 301 | #define CONNTRACK_ECACHE(x) (__get_cpu_var(nf_conntrack_ecache).x) |
| 302 | 302 | ||
| 303 | extern struct notifier_block *nf_conntrack_chain; | 303 | extern struct atomic_notifier_head nf_conntrack_chain; |
| 304 | extern struct notifier_block *nf_conntrack_expect_chain; | 304 | extern struct atomic_notifier_head nf_conntrack_expect_chain; |
| 305 | 305 | ||
| 306 | static inline int nf_conntrack_register_notifier(struct notifier_block *nb) | 306 | static inline int nf_conntrack_register_notifier(struct notifier_block *nb) |
| 307 | { | 307 | { |
| 308 | return notifier_chain_register(&nf_conntrack_chain, nb); | 308 | return atomic_notifier_chain_register(&nf_conntrack_chain, nb); |
| 309 | } | 309 | } |
| 310 | 310 | ||
| 311 | static inline int nf_conntrack_unregister_notifier(struct notifier_block *nb) | 311 | static inline int nf_conntrack_unregister_notifier(struct notifier_block *nb) |
| 312 | { | 312 | { |
| 313 | return notifier_chain_unregister(&nf_conntrack_chain, nb); | 313 | return atomic_notifier_chain_unregister(&nf_conntrack_chain, nb); |
| 314 | } | 314 | } |
| 315 | 315 | ||
| 316 | static inline int | 316 | static inline int |
| 317 | nf_conntrack_expect_register_notifier(struct notifier_block *nb) | 317 | nf_conntrack_expect_register_notifier(struct notifier_block *nb) |
| 318 | { | 318 | { |
| 319 | return notifier_chain_register(&nf_conntrack_expect_chain, nb); | 319 | return atomic_notifier_chain_register(&nf_conntrack_expect_chain, nb); |
| 320 | } | 320 | } |
| 321 | 321 | ||
| 322 | static inline int | 322 | static inline int |
| 323 | nf_conntrack_expect_unregister_notifier(struct notifier_block *nb) | 323 | nf_conntrack_expect_unregister_notifier(struct notifier_block *nb) |
| 324 | { | 324 | { |
| 325 | return notifier_chain_unregister(&nf_conntrack_expect_chain, nb); | 325 | return atomic_notifier_chain_unregister(&nf_conntrack_expect_chain, |
| 326 | nb); | ||
| 326 | } | 327 | } |
| 327 | 328 | ||
| 328 | extern void nf_ct_deliver_cached_events(const struct nf_conn *ct); | 329 | extern void nf_ct_deliver_cached_events(const struct nf_conn *ct); |
| @@ -347,14 +348,14 @@ static inline void nf_conntrack_event(enum ip_conntrack_events event, | |||
| 347 | struct nf_conn *ct) | 348 | struct nf_conn *ct) |
| 348 | { | 349 | { |
| 349 | if (nf_ct_is_confirmed(ct) && !nf_ct_is_dying(ct)) | 350 | if (nf_ct_is_confirmed(ct) && !nf_ct_is_dying(ct)) |
| 350 | notifier_call_chain(&nf_conntrack_chain, event, ct); | 351 | atomic_notifier_call_chain(&nf_conntrack_chain, event, ct); |
| 351 | } | 352 | } |
| 352 | 353 | ||
| 353 | static inline void | 354 | static inline void |
| 354 | nf_conntrack_expect_event(enum ip_conntrack_expect_events event, | 355 | nf_conntrack_expect_event(enum ip_conntrack_expect_events event, |
| 355 | struct nf_conntrack_expect *exp) | 356 | struct nf_conntrack_expect *exp) |
| 356 | { | 357 | { |
| 357 | notifier_call_chain(&nf_conntrack_expect_chain, event, exp); | 358 | atomic_notifier_call_chain(&nf_conntrack_expect_chain, event, exp); |
| 358 | } | 359 | } |
| 359 | #else /* CONFIG_NF_CONNTRACK_EVENTS */ | 360 | #else /* CONFIG_NF_CONNTRACK_EVENTS */ |
| 360 | static inline void nf_conntrack_event_cache(enum ip_conntrack_events event, | 361 | static inline void nf_conntrack_event_cache(enum ip_conntrack_events event, |
diff --git a/include/net/request_sock.h b/include/net/request_sock.h index 11641c9384f7..c5d7f920c352 100644 --- a/include/net/request_sock.h +++ b/include/net/request_sock.h | |||
| @@ -145,7 +145,7 @@ static inline struct request_sock * | |||
| 145 | { | 145 | { |
| 146 | struct request_sock *req = queue->rskq_accept_head; | 146 | struct request_sock *req = queue->rskq_accept_head; |
| 147 | 147 | ||
| 148 | queue->rskq_accept_head = queue->rskq_accept_head = NULL; | 148 | queue->rskq_accept_head = NULL; |
| 149 | return req; | 149 | return req; |
| 150 | } | 150 | } |
| 151 | 151 | ||
