aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorJeff Garzik <jeff@garzik.org>2006-03-22 19:13:54 -0500
committerJeff Garzik <jeff@garzik.org>2006-03-22 19:13:54 -0500
commitf01c18456993bab43067b678f56c87ca954aa43b (patch)
tree3e0cd0cdf1a57618202b46a7126125902e3ab832 /include/linux
parent949ec2c8e6b7b89179b85baf6309c009e1a1b951 (diff)
parent1c2e02750b992703a8a18634e08b04353face243 (diff)
Merge branch 'master'
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/device.h3
-rw-r--r--include/linux/dma-mapping.h1
-rw-r--r--include/linux/hugetlb.h45
-rw-r--r--include/linux/libata.h1
-rw-r--r--include/linux/migrate.h36
-rw-r--r--include/linux/mm.h48
-rw-r--r--include/linux/mm_inline.h2
-rw-r--r--include/linux/msdos_fs.h2
-rw-r--r--include/linux/net.h6
-rw-r--r--include/linux/page-flags.h24
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/linux/rtc.h4
-rw-r--r--include/linux/slab.h3
-rw-r--r--include/linux/smp.h23
-rw-r--r--include/linux/swap.h38
-rw-r--r--include/linux/workqueue.h6
-rw-r--r--include/linux/x25.h26
17 files changed, 177 insertions, 93 deletions
diff --git a/include/linux/device.h b/include/linux/device.h
index 5b595fdfb672..f6e72a65a3f2 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -378,6 +378,7 @@ extern void device_bind_driver(struct device * dev);
378extern void device_release_driver(struct device * dev); 378extern void device_release_driver(struct device * dev);
379extern int device_attach(struct device * dev); 379extern int device_attach(struct device * dev);
380extern void driver_attach(struct device_driver * drv); 380extern void driver_attach(struct device_driver * drv);
381extern void device_reprobe(struct device *dev);
381 382
382 383
383/* 384/*
@@ -399,7 +400,7 @@ extern struct device * get_device(struct device * dev);
399extern void put_device(struct device * dev); 400extern void put_device(struct device * dev);
400 401
401 402
402/* drivers/base/power.c */ 403/* drivers/base/power/shutdown.c */
403extern void device_shutdown(void); 404extern void device_shutdown(void);
404 405
405 406
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 2d80cc761a15..a8731062a74c 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -20,6 +20,7 @@ enum dma_data_direction {
20#define DMA_31BIT_MASK 0x000000007fffffffULL 20#define DMA_31BIT_MASK 0x000000007fffffffULL
21#define DMA_30BIT_MASK 0x000000003fffffffULL 21#define DMA_30BIT_MASK 0x000000003fffffffULL
22#define DMA_29BIT_MASK 0x000000001fffffffULL 22#define DMA_29BIT_MASK 0x000000001fffffffULL
23#define DMA_28BIT_MASK 0x000000000fffffffULL
23 24
24#include <asm/dma-mapping.h> 25#include <asm/dma-mapping.h>
25 26
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 68d82ad6b17c..d6f1019625af 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -20,10 +20,7 @@ void unmap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long)
20int hugetlb_prefault(struct address_space *, struct vm_area_struct *); 20int hugetlb_prefault(struct address_space *, struct vm_area_struct *);
21int hugetlb_report_meminfo(char *); 21int hugetlb_report_meminfo(char *);
22int hugetlb_report_node_meminfo(int, char *); 22int hugetlb_report_node_meminfo(int, char *);
23int is_hugepage_mem_enough(size_t);
24unsigned long hugetlb_total_pages(void); 23unsigned long hugetlb_total_pages(void);
25struct page *alloc_huge_page(struct vm_area_struct *, unsigned long);
26void free_huge_page(struct page *);
27int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, 24int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
28 unsigned long address, int write_access); 25 unsigned long address, int write_access);
29 26
@@ -39,18 +36,35 @@ struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
39 int write); 36 int write);
40struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address, 37struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
41 pmd_t *pmd, int write); 38 pmd_t *pmd, int write);
42int is_aligned_hugepage_range(unsigned long addr, unsigned long len);
43int pmd_huge(pmd_t pmd); 39int pmd_huge(pmd_t pmd);
40void hugetlb_change_protection(struct vm_area_struct *vma,
41 unsigned long address, unsigned long end, pgprot_t newprot);
44 42
45#ifndef ARCH_HAS_HUGEPAGE_ONLY_RANGE 43#ifndef ARCH_HAS_HUGEPAGE_ONLY_RANGE
46#define is_hugepage_only_range(mm, addr, len) 0 44#define is_hugepage_only_range(mm, addr, len) 0
47#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) \ 45#endif
48 do { } while (0) 46
47#ifndef ARCH_HAS_HUGETLB_FREE_PGD_RANGE
48#define hugetlb_free_pgd_range free_pgd_range
49#else
50void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr,
51 unsigned long end, unsigned long floor,
52 unsigned long ceiling);
49#endif 53#endif
50 54
51#ifndef ARCH_HAS_PREPARE_HUGEPAGE_RANGE 55#ifndef ARCH_HAS_PREPARE_HUGEPAGE_RANGE
52#define prepare_hugepage_range(addr, len) \ 56/*
53 is_aligned_hugepage_range(addr, len) 57 * If the arch doesn't supply something else, assume that hugepage
58 * size aligned regions are ok without further preparation.
59 */
60static inline int prepare_hugepage_range(unsigned long addr, unsigned long len)
61{
62 if (len & ~HPAGE_MASK)
63 return -EINVAL;
64 if (addr & ~HPAGE_MASK)
65 return -EINVAL;
66 return 0;
67}
54#else 68#else
55int prepare_hugepage_range(unsigned long addr, unsigned long len); 69int prepare_hugepage_range(unsigned long addr, unsigned long len);
56#endif 70#endif
@@ -87,20 +101,17 @@ static inline unsigned long hugetlb_total_pages(void)
87#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; }) 101#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
88#define hugetlb_prefault(mapping, vma) ({ BUG(); 0; }) 102#define hugetlb_prefault(mapping, vma) ({ BUG(); 0; })
89#define unmap_hugepage_range(vma, start, end) BUG() 103#define unmap_hugepage_range(vma, start, end) BUG()
90#define is_hugepage_mem_enough(size) 0
91#define hugetlb_report_meminfo(buf) 0 104#define hugetlb_report_meminfo(buf) 0
92#define hugetlb_report_node_meminfo(n, buf) 0 105#define hugetlb_report_node_meminfo(n, buf) 0
93#define follow_huge_pmd(mm, addr, pmd, write) NULL 106#define follow_huge_pmd(mm, addr, pmd, write) NULL
94#define is_aligned_hugepage_range(addr, len) 0
95#define prepare_hugepage_range(addr, len) (-EINVAL) 107#define prepare_hugepage_range(addr, len) (-EINVAL)
96#define pmd_huge(x) 0 108#define pmd_huge(x) 0
97#define is_hugepage_only_range(mm, addr, len) 0 109#define is_hugepage_only_range(mm, addr, len) 0
98#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) \ 110#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
99 do { } while (0)
100#define alloc_huge_page(vma, addr) ({ NULL; })
101#define free_huge_page(p) ({ (void)(p); BUG(); })
102#define hugetlb_fault(mm, vma, addr, write) ({ BUG(); 0; }) 111#define hugetlb_fault(mm, vma, addr, write) ({ BUG(); 0; })
103 112
113#define hugetlb_change_protection(vma, address, end, newprot)
114
104#ifndef HPAGE_MASK 115#ifndef HPAGE_MASK
105#define HPAGE_MASK PAGE_MASK /* Keep the compiler happy */ 116#define HPAGE_MASK PAGE_MASK /* Keep the compiler happy */
106#define HPAGE_SIZE PAGE_SIZE 117#define HPAGE_SIZE PAGE_SIZE
@@ -128,6 +139,8 @@ struct hugetlbfs_sb_info {
128 139
129struct hugetlbfs_inode_info { 140struct hugetlbfs_inode_info {
130 struct shared_policy policy; 141 struct shared_policy policy;
142 /* Protected by the (global) hugetlb_lock */
143 unsigned long prereserved_hpages;
131 struct inode vfs_inode; 144 struct inode vfs_inode;
132}; 145};
133 146
@@ -144,6 +157,10 @@ static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
144extern struct file_operations hugetlbfs_file_operations; 157extern struct file_operations hugetlbfs_file_operations;
145extern struct vm_operations_struct hugetlb_vm_ops; 158extern struct vm_operations_struct hugetlb_vm_ops;
146struct file *hugetlb_zero_setup(size_t); 159struct file *hugetlb_zero_setup(size_t);
160int hugetlb_extend_reservation(struct hugetlbfs_inode_info *info,
161 unsigned long atleast_hpages);
162void hugetlb_truncate_reservation(struct hugetlbfs_inode_info *info,
163 unsigned long atmost_hpages);
147int hugetlb_get_quota(struct address_space *mapping); 164int hugetlb_get_quota(struct address_space *mapping);
148void hugetlb_put_quota(struct address_space *mapping); 165void hugetlb_put_quota(struct address_space *mapping);
149 166
diff --git a/include/linux/libata.h b/include/linux/libata.h
index d81cecdda4f3..7a54244d30aa 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -509,7 +509,6 @@ extern void ata_host_set_remove(struct ata_host_set *host_set);
509extern int ata_scsi_detect(struct scsi_host_template *sht); 509extern int ata_scsi_detect(struct scsi_host_template *sht);
510extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg); 510extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
511extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)); 511extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
512extern enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd);
513extern int ata_scsi_error(struct Scsi_Host *host); 512extern int ata_scsi_error(struct Scsi_Host *host);
514extern void ata_eh_qc_complete(struct ata_queued_cmd *qc); 513extern void ata_eh_qc_complete(struct ata_queued_cmd *qc);
515extern void ata_eh_qc_retry(struct ata_queued_cmd *qc); 514extern void ata_eh_qc_retry(struct ata_queued_cmd *qc);
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
new file mode 100644
index 000000000000..7d09962c3c0b
--- /dev/null
+++ b/include/linux/migrate.h
@@ -0,0 +1,36 @@
1#ifndef _LINUX_MIGRATE_H
2#define _LINUX_MIGRATE_H
3
4#include <linux/config.h>
5#include <linux/mm.h>
6
7#ifdef CONFIG_MIGRATION
8extern int isolate_lru_page(struct page *p, struct list_head *pagelist);
9extern int putback_lru_pages(struct list_head *l);
10extern int migrate_page(struct page *, struct page *);
11extern void migrate_page_copy(struct page *, struct page *);
12extern int migrate_page_remove_references(struct page *, struct page *, int);
13extern int migrate_pages(struct list_head *l, struct list_head *t,
14 struct list_head *moved, struct list_head *failed);
15int migrate_pages_to(struct list_head *pagelist,
16 struct vm_area_struct *vma, int dest);
17extern int fail_migrate_page(struct page *, struct page *);
18
19extern int migrate_prep(void);
20
21#else
22
23static inline int isolate_lru_page(struct page *p, struct list_head *list)
24 { return -ENOSYS; }
25static inline int putback_lru_pages(struct list_head *l) { return 0; }
26static inline int migrate_pages(struct list_head *l, struct list_head *t,
27 struct list_head *moved, struct list_head *failed) { return -ENOSYS; }
28
29static inline int migrate_prep(void) { return -ENOSYS; }
30
31/* Possible settings for the migrate_page() method in address_operations */
32#define migrate_page NULL
33#define fail_migrate_page NULL
34
35#endif /* CONFIG_MIGRATION */
36#endif /* _LINUX_MIGRATE_H */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 498ff8778fb6..6aa016f1d3ae 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -286,43 +286,34 @@ struct page {
286 * 286 *
287 * Also, many kernel routines increase the page count before a critical 287 * Also, many kernel routines increase the page count before a critical
288 * routine so they can be sure the page doesn't go away from under them. 288 * routine so they can be sure the page doesn't go away from under them.
289 *
290 * Since 2.6.6 (approx), a free page has ->_count = -1. This is so that we
291 * can use atomic_add_negative(-1, page->_count) to detect when the page
292 * becomes free and so that we can also use atomic_inc_and_test to atomically
293 * detect when we just tried to grab a ref on a page which some other CPU has
294 * already deemed to be freeable.
295 *
296 * NO code should make assumptions about this internal detail! Use the provided
297 * macros which retain the old rules: page_count(page) == 0 is a free page.
298 */ 289 */
299 290
300/* 291/*
301 * Drop a ref, return true if the logical refcount fell to zero (the page has 292 * Drop a ref, return true if the logical refcount fell to zero (the page has
302 * no users) 293 * no users)
303 */ 294 */
304#define put_page_testzero(p) \ 295static inline int put_page_testzero(struct page *page)
305 ({ \ 296{
306 BUG_ON(atomic_read(&(p)->_count) == -1);\ 297 BUG_ON(atomic_read(&page->_count) == 0);
307 atomic_add_negative(-1, &(p)->_count); \ 298 return atomic_dec_and_test(&page->_count);
308 }) 299}
309 300
310/* 301/*
311 * Grab a ref, return true if the page previously had a logical refcount of 302 * Try to grab a ref unless the page has a refcount of zero, return false if
312 * zero. ie: returns true if we just grabbed an already-deemed-to-be-free page 303 * that is the case.
313 */ 304 */
314#define get_page_testone(p) atomic_inc_and_test(&(p)->_count) 305static inline int get_page_unless_zero(struct page *page)
315 306{
316#define set_page_count(p,v) atomic_set(&(p)->_count, (v) - 1) 307 return atomic_inc_not_zero(&page->_count);
317#define __put_page(p) atomic_dec(&(p)->_count) 308}
318 309
319extern void FASTCALL(__page_cache_release(struct page *)); 310extern void FASTCALL(__page_cache_release(struct page *));
320 311
321static inline int page_count(struct page *page) 312static inline int page_count(struct page *page)
322{ 313{
323 if (PageCompound(page)) 314 if (unlikely(PageCompound(page)))
324 page = (struct page *)page_private(page); 315 page = (struct page *)page_private(page);
325 return atomic_read(&page->_count) + 1; 316 return atomic_read(&page->_count);
326} 317}
327 318
328static inline void get_page(struct page *page) 319static inline void get_page(struct page *page)
@@ -332,8 +323,19 @@ static inline void get_page(struct page *page)
332 atomic_inc(&page->_count); 323 atomic_inc(&page->_count);
333} 324}
334 325
326/*
327 * Setup the page count before being freed into the page allocator for
328 * the first time (boot or memory hotplug)
329 */
330static inline void init_page_count(struct page *page)
331{
332 atomic_set(&page->_count, 1);
333}
334
335void put_page(struct page *page); 335void put_page(struct page *page);
336 336
337void split_page(struct page *page, unsigned int order);
338
337/* 339/*
338 * Multiple processes may "see" the same page. E.g. for untouched 340 * Multiple processes may "see" the same page. E.g. for untouched
339 * mappings of /dev/null, all processes see the same page full of 341 * mappings of /dev/null, all processes see the same page full of
@@ -1046,7 +1048,7 @@ int in_gate_area_no_task(unsigned long addr);
1046 1048
1047int drop_caches_sysctl_handler(struct ctl_table *, int, struct file *, 1049int drop_caches_sysctl_handler(struct ctl_table *, int, struct file *,
1048 void __user *, size_t *, loff_t *); 1050 void __user *, size_t *, loff_t *);
1049int shrink_slab(unsigned long scanned, gfp_t gfp_mask, 1051unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
1050 unsigned long lru_pages); 1052 unsigned long lru_pages);
1051void drop_pagecache(void); 1053void drop_pagecache(void);
1052void drop_slab(void); 1054void drop_slab(void);
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 8ac854f7f190..3b6723dfaff3 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -32,7 +32,7 @@ del_page_from_lru(struct zone *zone, struct page *page)
32{ 32{
33 list_del(&page->lru); 33 list_del(&page->lru);
34 if (PageActive(page)) { 34 if (PageActive(page)) {
35 ClearPageActive(page); 35 __ClearPageActive(page);
36 zone->nr_active--; 36 zone->nr_active--;
37 } else { 37 } else {
38 zone->nr_inactive--; 38 zone->nr_inactive--;
diff --git a/include/linux/msdos_fs.h b/include/linux/msdos_fs.h
index e933e2a355ad..8bcd9450d926 100644
--- a/include/linux/msdos_fs.h
+++ b/include/linux/msdos_fs.h
@@ -199,7 +199,7 @@ struct fat_mount_options {
199 sys_immutable:1, /* set = system files are immutable */ 199 sys_immutable:1, /* set = system files are immutable */
200 dotsOK:1, /* set = hidden and system files are named '.filename' */ 200 dotsOK:1, /* set = hidden and system files are named '.filename' */
201 isvfat:1, /* 0=no vfat long filename support, 1=vfat support */ 201 isvfat:1, /* 0=no vfat long filename support, 1=vfat support */
202 utf8:1, /* Use of UTF8 character set (Default) */ 202 utf8:1, /* Use of UTF-8 character set (Default) */
203 unicode_xlate:1, /* create escape sequences for unhandled Unicode */ 203 unicode_xlate:1, /* create escape sequences for unhandled Unicode */
204 numtail:1, /* Does first alias have a numeric '~1' type tail? */ 204 numtail:1, /* Does first alias have a numeric '~1' type tail? */
205 atari:1, /* Use Atari GEMDOS variation of MS-DOS fs */ 205 atari:1, /* Use Atari GEMDOS variation of MS-DOS fs */
diff --git a/include/linux/net.h b/include/linux/net.h
index 152fa6551fd8..84a490e5f0a1 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -143,6 +143,8 @@ struct proto_ops {
143 struct poll_table_struct *wait); 143 struct poll_table_struct *wait);
144 int (*ioctl) (struct socket *sock, unsigned int cmd, 144 int (*ioctl) (struct socket *sock, unsigned int cmd,
145 unsigned long arg); 145 unsigned long arg);
146 int (*compat_ioctl) (struct socket *sock, unsigned int cmd,
147 unsigned long arg);
146 int (*listen) (struct socket *sock, int len); 148 int (*listen) (struct socket *sock, int len);
147 int (*shutdown) (struct socket *sock, int flags); 149 int (*shutdown) (struct socket *sock, int flags);
148 int (*setsockopt)(struct socket *sock, int level, 150 int (*setsockopt)(struct socket *sock, int level,
@@ -251,6 +253,8 @@ SOCKCALL_UWRAP(name, poll, (struct file *file, struct socket *sock, struct poll_
251 (file, sock, wait)) \ 253 (file, sock, wait)) \
252SOCKCALL_WRAP(name, ioctl, (struct socket *sock, unsigned int cmd, \ 254SOCKCALL_WRAP(name, ioctl, (struct socket *sock, unsigned int cmd, \
253 unsigned long arg), (sock, cmd, arg)) \ 255 unsigned long arg), (sock, cmd, arg)) \
256SOCKCALL_WRAP(name, compat_ioctl, (struct socket *sock, unsigned int cmd, \
257 unsigned long arg), (sock, cmd, arg)) \
254SOCKCALL_WRAP(name, listen, (struct socket *sock, int len), (sock, len)) \ 258SOCKCALL_WRAP(name, listen, (struct socket *sock, int len), (sock, len)) \
255SOCKCALL_WRAP(name, shutdown, (struct socket *sock, int flags), (sock, flags)) \ 259SOCKCALL_WRAP(name, shutdown, (struct socket *sock, int flags), (sock, flags)) \
256SOCKCALL_WRAP(name, setsockopt, (struct socket *sock, int level, int optname, \ 260SOCKCALL_WRAP(name, setsockopt, (struct socket *sock, int level, int optname, \
@@ -275,6 +279,7 @@ static const struct proto_ops name##_ops = { \
275 .getname = __lock_##name##_getname, \ 279 .getname = __lock_##name##_getname, \
276 .poll = __lock_##name##_poll, \ 280 .poll = __lock_##name##_poll, \
277 .ioctl = __lock_##name##_ioctl, \ 281 .ioctl = __lock_##name##_ioctl, \
282 .compat_ioctl = __lock_##name##_compat_ioctl, \
278 .listen = __lock_##name##_listen, \ 283 .listen = __lock_##name##_listen, \
279 .shutdown = __lock_##name##_shutdown, \ 284 .shutdown = __lock_##name##_shutdown, \
280 .setsockopt = __lock_##name##_setsockopt, \ 285 .setsockopt = __lock_##name##_setsockopt, \
@@ -283,6 +288,7 @@ static const struct proto_ops name##_ops = { \
283 .recvmsg = __lock_##name##_recvmsg, \ 288 .recvmsg = __lock_##name##_recvmsg, \
284 .mmap = __lock_##name##_mmap, \ 289 .mmap = __lock_##name##_mmap, \
285}; 290};
291
286#endif 292#endif
287 293
288#define MODULE_ALIAS_NETPROTO(proto) \ 294#define MODULE_ALIAS_NETPROTO(proto) \
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index d52999c43336..9ea629c02a4b 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -86,8 +86,9 @@
86 * - The __xxx_page_state variants can be used safely when interrupts are 86 * - The __xxx_page_state variants can be used safely when interrupts are
87 * disabled. 87 * disabled.
88 * - The __xxx_page_state variants can be used if the field is only 88 * - The __xxx_page_state variants can be used if the field is only
89 * modified from process context, or only modified from interrupt context. 89 * modified from process context and protected from preemption, or only
90 * In this case, the field should be commented here. 90 * modified from interrupt context. In this case, the field should be
91 * commented here.
91 */ 92 */
92struct page_state { 93struct page_state {
93 unsigned long nr_dirty; /* Dirty writeable pages */ 94 unsigned long nr_dirty; /* Dirty writeable pages */
@@ -239,22 +240,19 @@ extern void __mod_page_state_offset(unsigned long offset, unsigned long delta);
239#define __ClearPageDirty(page) __clear_bit(PG_dirty, &(page)->flags) 240#define __ClearPageDirty(page) __clear_bit(PG_dirty, &(page)->flags)
240#define TestClearPageDirty(page) test_and_clear_bit(PG_dirty, &(page)->flags) 241#define TestClearPageDirty(page) test_and_clear_bit(PG_dirty, &(page)->flags)
241 242
242#define SetPageLRU(page) set_bit(PG_lru, &(page)->flags)
243#define PageLRU(page) test_bit(PG_lru, &(page)->flags) 243#define PageLRU(page) test_bit(PG_lru, &(page)->flags)
244#define TestSetPageLRU(page) test_and_set_bit(PG_lru, &(page)->flags) 244#define SetPageLRU(page) set_bit(PG_lru, &(page)->flags)
245#define TestClearPageLRU(page) test_and_clear_bit(PG_lru, &(page)->flags) 245#define ClearPageLRU(page) clear_bit(PG_lru, &(page)->flags)
246#define __ClearPageLRU(page) __clear_bit(PG_lru, &(page)->flags)
246 247
247#define PageActive(page) test_bit(PG_active, &(page)->flags) 248#define PageActive(page) test_bit(PG_active, &(page)->flags)
248#define SetPageActive(page) set_bit(PG_active, &(page)->flags) 249#define SetPageActive(page) set_bit(PG_active, &(page)->flags)
249#define ClearPageActive(page) clear_bit(PG_active, &(page)->flags) 250#define ClearPageActive(page) clear_bit(PG_active, &(page)->flags)
250#define TestClearPageActive(page) test_and_clear_bit(PG_active, &(page)->flags) 251#define __ClearPageActive(page) __clear_bit(PG_active, &(page)->flags)
251#define TestSetPageActive(page) test_and_set_bit(PG_active, &(page)->flags)
252 252
253#define PageSlab(page) test_bit(PG_slab, &(page)->flags) 253#define PageSlab(page) test_bit(PG_slab, &(page)->flags)
254#define SetPageSlab(page) set_bit(PG_slab, &(page)->flags) 254#define __SetPageSlab(page) __set_bit(PG_slab, &(page)->flags)
255#define ClearPageSlab(page) clear_bit(PG_slab, &(page)->flags) 255#define __ClearPageSlab(page) __clear_bit(PG_slab, &(page)->flags)
256#define TestClearPageSlab(page) test_and_clear_bit(PG_slab, &(page)->flags)
257#define TestSetPageSlab(page) test_and_set_bit(PG_slab, &(page)->flags)
258 256
259#ifdef CONFIG_HIGHMEM 257#ifdef CONFIG_HIGHMEM
260#define PageHighMem(page) is_highmem(page_zone(page)) 258#define PageHighMem(page) is_highmem(page_zone(page))
@@ -329,8 +327,8 @@ extern void __mod_page_state_offset(unsigned long offset, unsigned long delta);
329#define TestClearPageReclaim(page) test_and_clear_bit(PG_reclaim, &(page)->flags) 327#define TestClearPageReclaim(page) test_and_clear_bit(PG_reclaim, &(page)->flags)
330 328
331#define PageCompound(page) test_bit(PG_compound, &(page)->flags) 329#define PageCompound(page) test_bit(PG_compound, &(page)->flags)
332#define SetPageCompound(page) set_bit(PG_compound, &(page)->flags) 330#define __SetPageCompound(page) __set_bit(PG_compound, &(page)->flags)
333#define ClearPageCompound(page) clear_bit(PG_compound, &(page)->flags) 331#define __ClearPageCompound(page) __clear_bit(PG_compound, &(page)->flags)
334 332
335#ifdef CONFIG_SWAP 333#ifdef CONFIG_SWAP
336#define PageSwapCache(page) test_bit(PG_swapcache, &(page)->flags) 334#define PageSwapCache(page) test_bit(PG_swapcache, &(page)->flags)
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index b9810ddf435a..ec3c32932620 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -852,6 +852,8 @@
852#define PCI_DEVICE_ID_QLOGIC_ISP2432 0x2432 852#define PCI_DEVICE_ID_QLOGIC_ISP2432 0x2432
853#define PCI_DEVICE_ID_QLOGIC_ISP2512 0x2512 853#define PCI_DEVICE_ID_QLOGIC_ISP2512 0x2512
854#define PCI_DEVICE_ID_QLOGIC_ISP2522 0x2522 854#define PCI_DEVICE_ID_QLOGIC_ISP2522 0x2522
855#define PCI_DEVICE_ID_QLOGIC_ISP5422 0x5422
856#define PCI_DEVICE_ID_QLOGIC_ISP5432 0x5432
855 857
856#define PCI_VENDOR_ID_CYRIX 0x1078 858#define PCI_VENDOR_ID_CYRIX 0x1078
857#define PCI_DEVICE_ID_CYRIX_5510 0x0000 859#define PCI_DEVICE_ID_CYRIX_5510 0x0000
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 0b2ba67ff13c..b739ac1f7ca0 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -11,8 +11,6 @@
11#ifndef _LINUX_RTC_H_ 11#ifndef _LINUX_RTC_H_
12#define _LINUX_RTC_H_ 12#define _LINUX_RTC_H_
13 13
14#include <linux/interrupt.h>
15
16/* 14/*
17 * The struct used to pass data via the following ioctl. Similar to the 15 * The struct used to pass data via the following ioctl. Similar to the
18 * struct tm in <time.h>, but it needs to be here so that the kernel 16 * struct tm in <time.h>, but it needs to be here so that the kernel
@@ -95,6 +93,8 @@ struct rtc_pll_info {
95 93
96#ifdef __KERNEL__ 94#ifdef __KERNEL__
97 95
96#include <linux/interrupt.h>
97
98typedef struct rtc_task { 98typedef struct rtc_task {
99 void (*func)(void *private_data); 99 void (*func)(void *private_data);
100 void *private_data; 100 void *private_data;
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 8cf52939d0ab..2b28c849d75a 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -38,7 +38,6 @@ typedef struct kmem_cache kmem_cache_t;
38#define SLAB_DEBUG_INITIAL 0x00000200UL /* Call constructor (as verifier) */ 38#define SLAB_DEBUG_INITIAL 0x00000200UL /* Call constructor (as verifier) */
39#define SLAB_RED_ZONE 0x00000400UL /* Red zone objs in a cache */ 39#define SLAB_RED_ZONE 0x00000400UL /* Red zone objs in a cache */
40#define SLAB_POISON 0x00000800UL /* Poison objects */ 40#define SLAB_POISON 0x00000800UL /* Poison objects */
41#define SLAB_NO_REAP 0x00001000UL /* never reap from the cache */
42#define SLAB_HWCACHE_ALIGN 0x00002000UL /* align objs on a h/w cache lines */ 41#define SLAB_HWCACHE_ALIGN 0x00002000UL /* align objs on a h/w cache lines */
43#define SLAB_CACHE_DMA 0x00004000UL /* use GFP_DMA memory */ 42#define SLAB_CACHE_DMA 0x00004000UL /* use GFP_DMA memory */
44#define SLAB_MUST_HWCACHE_ALIGN 0x00008000UL /* force alignment */ 43#define SLAB_MUST_HWCACHE_ALIGN 0x00008000UL /* force alignment */
@@ -118,7 +117,7 @@ extern void *kzalloc(size_t, gfp_t);
118 */ 117 */
119static inline void *kcalloc(size_t n, size_t size, gfp_t flags) 118static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
120{ 119{
121 if (n != 0 && size > INT_MAX / n) 120 if (n != 0 && size > ULONG_MAX / n)
122 return NULL; 121 return NULL;
123 return kzalloc(n * size, flags); 122 return kzalloc(n * size, flags);
124} 123}
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 44153fdf73fc..d699a16b0cb2 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -52,23 +52,12 @@ extern void smp_cpus_done(unsigned int max_cpus);
52/* 52/*
53 * Call a function on all other processors 53 * Call a function on all other processors
54 */ 54 */
55extern int smp_call_function (void (*func) (void *info), void *info, 55int smp_call_function(void(*func)(void *info), void *info, int retry, int wait);
56 int retry, int wait);
57 56
58/* 57/*
59 * Call a function on all processors 58 * Call a function on all processors
60 */ 59 */
61static inline int on_each_cpu(void (*func) (void *info), void *info, 60int on_each_cpu(void (*func) (void *info), void *info, int retry, int wait);
62 int retry, int wait)
63{
64 int ret = 0;
65
66 preempt_disable();
67 ret = smp_call_function(func, info, retry, wait);
68 func(info);
69 preempt_enable();
70 return ret;
71}
72 61
73#define MSG_ALL_BUT_SELF 0x8000 /* Assume <32768 CPU's */ 62#define MSG_ALL_BUT_SELF 0x8000 /* Assume <32768 CPU's */
74#define MSG_ALL 0x8001 63#define MSG_ALL 0x8001
@@ -94,7 +83,13 @@ void smp_prepare_boot_cpu(void);
94#define raw_smp_processor_id() 0 83#define raw_smp_processor_id() 0
95#define hard_smp_processor_id() 0 84#define hard_smp_processor_id() 0
96#define smp_call_function(func,info,retry,wait) ({ 0; }) 85#define smp_call_function(func,info,retry,wait) ({ 0; })
97#define on_each_cpu(func,info,retry,wait) ({ func(info); 0; }) 86#define on_each_cpu(func,info,retry,wait) \
87 ({ \
88 local_irq_disable(); \
89 func(info); \
90 local_irq_enable(); \
91 0; \
92 })
98static inline void smp_send_reschedule(int cpu) { } 93static inline void smp_send_reschedule(int cpu) { }
99#define num_booting_cpus() 1 94#define num_booting_cpus() 1
100#define smp_prepare_boot_cpu() do {} while (0) 95#define smp_prepare_boot_cpu() do {} while (0)
diff --git a/include/linux/swap.h b/include/linux/swap.h
index d572b19afb7d..12415dd94451 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -172,9 +172,24 @@ extern int rotate_reclaimable_page(struct page *page);
172extern void swap_setup(void); 172extern void swap_setup(void);
173 173
174/* linux/mm/vmscan.c */ 174/* linux/mm/vmscan.c */
175extern int try_to_free_pages(struct zone **, gfp_t); 175extern unsigned long try_to_free_pages(struct zone **, gfp_t);
176extern int shrink_all_memory(int); 176extern unsigned long shrink_all_memory(unsigned long nr_pages);
177extern int vm_swappiness; 177extern int vm_swappiness;
178extern int remove_mapping(struct address_space *mapping, struct page *page);
179
180/* possible outcome of pageout() */
181typedef enum {
182 /* failed to write page out, page is locked */
183 PAGE_KEEP,
184 /* move page to the active list, page is locked */
185 PAGE_ACTIVATE,
186 /* page has been sent to the disk successfully, page is unlocked */
187 PAGE_SUCCESS,
188 /* page is clean and locked */
189 PAGE_CLEAN,
190} pageout_t;
191
192extern pageout_t pageout(struct page *page, struct address_space *mapping);
178 193
179#ifdef CONFIG_NUMA 194#ifdef CONFIG_NUMA
180extern int zone_reclaim_mode; 195extern int zone_reclaim_mode;
@@ -188,25 +203,6 @@ static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order)
188} 203}
189#endif 204#endif
190 205
191#ifdef CONFIG_MIGRATION
192extern int isolate_lru_page(struct page *p);
193extern int putback_lru_pages(struct list_head *l);
194extern int migrate_page(struct page *, struct page *);
195extern void migrate_page_copy(struct page *, struct page *);
196extern int migrate_page_remove_references(struct page *, struct page *, int);
197extern int migrate_pages(struct list_head *l, struct list_head *t,
198 struct list_head *moved, struct list_head *failed);
199extern int fail_migrate_page(struct page *, struct page *);
200#else
201static inline int isolate_lru_page(struct page *p) { return -ENOSYS; }
202static inline int putback_lru_pages(struct list_head *l) { return 0; }
203static inline int migrate_pages(struct list_head *l, struct list_head *t,
204 struct list_head *moved, struct list_head *failed) { return -ENOSYS; }
205/* Possible settings for the migrate_page() method in address_operations */
206#define migrate_page NULL
207#define fail_migrate_page NULL
208#endif
209
210#ifdef CONFIG_MMU 206#ifdef CONFIG_MMU
211/* linux/mm/shmem.c */ 207/* linux/mm/shmem.c */
212extern int shmem_unuse(swp_entry_t entry, struct page *page); 208extern int shmem_unuse(swp_entry_t entry, struct page *page);
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 86b111300231..957c21c16d62 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -20,6 +20,10 @@ struct work_struct {
20 struct timer_list timer; 20 struct timer_list timer;
21}; 21};
22 22
23struct execute_work {
24 struct work_struct work;
25};
26
23#define __WORK_INITIALIZER(n, f, d) { \ 27#define __WORK_INITIALIZER(n, f, d) { \
24 .entry = { &(n).entry, &(n).entry }, \ 28 .entry = { &(n).entry, &(n).entry }, \
25 .func = (f), \ 29 .func = (f), \
@@ -74,6 +78,8 @@ extern void init_workqueues(void);
74void cancel_rearming_delayed_work(struct work_struct *work); 78void cancel_rearming_delayed_work(struct work_struct *work);
75void cancel_rearming_delayed_workqueue(struct workqueue_struct *, 79void cancel_rearming_delayed_workqueue(struct workqueue_struct *,
76 struct work_struct *); 80 struct work_struct *);
81int execute_in_process_context(void (*fn)(void *), void *,
82 struct execute_work *);
77 83
78/* 84/*
79 * Kill off a pending schedule_delayed_work(). Note that the work callback 85 * Kill off a pending schedule_delayed_work(). Note that the work callback
diff --git a/include/linux/x25.h b/include/linux/x25.h
index 16d44931afa0..d035e4e87d07 100644
--- a/include/linux/x25.h
+++ b/include/linux/x25.h
@@ -11,6 +11,8 @@
11#ifndef X25_KERNEL_H 11#ifndef X25_KERNEL_H
12#define X25_KERNEL_H 12#define X25_KERNEL_H
13 13
14#include <linux/types.h>
15
14#define SIOCX25GSUBSCRIP (SIOCPROTOPRIVATE + 0) 16#define SIOCX25GSUBSCRIP (SIOCPROTOPRIVATE + 0)
15#define SIOCX25SSUBSCRIP (SIOCPROTOPRIVATE + 1) 17#define SIOCX25SSUBSCRIP (SIOCPROTOPRIVATE + 1)
16#define SIOCX25GFACILITIES (SIOCPROTOPRIVATE + 2) 18#define SIOCX25GFACILITIES (SIOCPROTOPRIVATE + 2)
@@ -21,6 +23,8 @@
21#define SIOCX25SCUDMATCHLEN (SIOCPROTOPRIVATE + 7) 23#define SIOCX25SCUDMATCHLEN (SIOCPROTOPRIVATE + 7)
22#define SIOCX25CALLACCPTAPPRV (SIOCPROTOPRIVATE + 8) 24#define SIOCX25CALLACCPTAPPRV (SIOCPROTOPRIVATE + 8)
23#define SIOCX25SENDCALLACCPT (SIOCPROTOPRIVATE + 9) 25#define SIOCX25SENDCALLACCPT (SIOCPROTOPRIVATE + 9)
26#define SIOCX25GDTEFACILITIES (SIOCPROTOPRIVATE + 10)
27#define SIOCX25SDTEFACILITIES (SIOCPROTOPRIVATE + 11)
24 28
25/* 29/*
26 * Values for {get,set}sockopt. 30 * Values for {get,set}sockopt.
@@ -77,6 +81,8 @@ struct x25_subscrip_struct {
77#define X25_MASK_PACKET_SIZE 0x04 81#define X25_MASK_PACKET_SIZE 0x04
78#define X25_MASK_WINDOW_SIZE 0x08 82#define X25_MASK_WINDOW_SIZE 0x08
79 83
84#define X25_MASK_CALLING_AE 0x10
85#define X25_MASK_CALLED_AE 0x20
80 86
81 87
82/* 88/*
@@ -99,6 +105,26 @@ struct x25_facilities {
99}; 105};
100 106
101/* 107/*
108* ITU DTE facilities
109* Only the called and calling address
110* extension are currently implemented.
111* The rest are in place to avoid the struct
112* changing size if someone needs them later
113*/
114
115struct x25_dte_facilities {
116 __u16 delay_cumul;
117 __u16 delay_target;
118 __u16 delay_max;
119 __u8 min_throughput;
120 __u8 expedited;
121 __u8 calling_len;
122 __u8 called_len;
123 __u8 calling_ae[20];
124 __u8 called_ae[20];
125};
126
127/*
102 * Call User Data structure. 128 * Call User Data structure.
103 */ 129 */
104struct x25_calluserdata { 130struct x25_calluserdata {