diff options
85 files changed, 682 insertions, 503 deletions
diff --git a/arch/frv/kernel/semaphore.c b/arch/frv/kernel/semaphore.c index 5cba9c1f2b3d..7971d680ae29 100644 --- a/arch/frv/kernel/semaphore.c +++ b/arch/frv/kernel/semaphore.c | |||
| @@ -20,7 +20,7 @@ struct sem_waiter { | |||
| 20 | struct task_struct *task; | 20 | struct task_struct *task; |
| 21 | }; | 21 | }; |
| 22 | 22 | ||
| 23 | #if SEM_DEBUG | 23 | #if SEMAPHORE_DEBUG |
| 24 | void semtrace(struct semaphore *sem, const char *str) | 24 | void semtrace(struct semaphore *sem, const char *str) |
| 25 | { | 25 | { |
| 26 | if (sem->debug) | 26 | if (sem->debug) |
diff --git a/arch/frv/mb93090-mb00/pci-irq.c b/arch/frv/mb93090-mb00/pci-irq.c index af981bda015c..24622d89b1ca 100644 --- a/arch/frv/mb93090-mb00/pci-irq.c +++ b/arch/frv/mb93090-mb00/pci-irq.c | |||
| @@ -60,7 +60,7 @@ void __init pcibios_fixup_irqs(void) | |||
| 60 | } | 60 | } |
| 61 | } | 61 | } |
| 62 | 62 | ||
| 63 | void __init pcibios_penalize_isa_irq(int irq, int active) | 63 | void __init pcibios_penalize_isa_irq(int irq) |
| 64 | { | 64 | { |
| 65 | } | 65 | } |
| 66 | 66 | ||
diff --git a/arch/frv/mm/init.c b/arch/frv/mm/init.c index 79433159b5f0..765088ea8a50 100644 --- a/arch/frv/mm/init.c +++ b/arch/frv/mm/init.c | |||
| @@ -108,7 +108,7 @@ void __init paging_init(void) | |||
| 108 | 108 | ||
| 109 | memset((void *) empty_zero_page, 0, PAGE_SIZE); | 109 | memset((void *) empty_zero_page, 0, PAGE_SIZE); |
| 110 | 110 | ||
| 111 | #if CONFIG_HIGHMEM | 111 | #ifdef CONFIG_HIGHMEM |
| 112 | if (num_physpages - num_mappedpages) { | 112 | if (num_physpages - num_mappedpages) { |
| 113 | pgd_t *pge; | 113 | pgd_t *pge; |
| 114 | pud_t *pue; | 114 | pud_t *pue; |
diff --git a/arch/frv/mm/pgalloc.c b/arch/frv/mm/pgalloc.c index 2c67dfe5a6b3..f76dd03ddd99 100644 --- a/arch/frv/mm/pgalloc.c +++ b/arch/frv/mm/pgalloc.c | |||
| @@ -85,7 +85,7 @@ static inline void pgd_list_add(pgd_t *pgd) | |||
| 85 | struct page *page = virt_to_page(pgd); | 85 | struct page *page = virt_to_page(pgd); |
| 86 | page->index = (unsigned long) pgd_list; | 86 | page->index = (unsigned long) pgd_list; |
| 87 | if (pgd_list) | 87 | if (pgd_list) |
| 88 | pgd_list->private = (unsigned long) &page->index; | 88 | set_page_private(pgd_list, (unsigned long) &page->index); |
| 89 | pgd_list = page; | 89 | pgd_list = page; |
| 90 | set_page_private(page, (unsigned long)&pgd_list); | 90 | set_page_private(page, (unsigned long)&pgd_list); |
| 91 | } | 91 | } |
| @@ -94,10 +94,10 @@ static inline void pgd_list_del(pgd_t *pgd) | |||
| 94 | { | 94 | { |
| 95 | struct page *next, **pprev, *page = virt_to_page(pgd); | 95 | struct page *next, **pprev, *page = virt_to_page(pgd); |
| 96 | next = (struct page *) page->index; | 96 | next = (struct page *) page->index; |
| 97 | pprev = (struct page **)page_private(page); | 97 | pprev = (struct page **) page_private(page); |
| 98 | *pprev = next; | 98 | *pprev = next; |
| 99 | if (next) | 99 | if (next) |
| 100 | next->private = (unsigned long) pprev; | 100 | set_page_private(next, (unsigned long) pprev); |
| 101 | } | 101 | } |
| 102 | 102 | ||
| 103 | void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused) | 103 | void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused) |
diff --git a/arch/m32r/kernel/io_mappi3.c b/arch/m32r/kernel/io_mappi3.c index 6716ffea769a..f80321a58764 100644 --- a/arch/m32r/kernel/io_mappi3.c +++ b/arch/m32r/kernel/io_mappi3.c | |||
| @@ -36,12 +36,13 @@ static inline void *_port2addr(unsigned long port) | |||
| 36 | return (void *)(port + NONCACHE_OFFSET); | 36 | return (void *)(port + NONCACHE_OFFSET); |
| 37 | } | 37 | } |
| 38 | 38 | ||
| 39 | #if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC) | 39 | #if defined(CONFIG_IDE) |
| 40 | static inline void *__port2addr_ata(unsigned long port) | 40 | static inline void *__port2addr_ata(unsigned long port) |
| 41 | { | 41 | { |
| 42 | static int dummy_reg; | 42 | static int dummy_reg; |
| 43 | 43 | ||
| 44 | switch (port) { | 44 | switch (port) { |
| 45 | /* IDE0 CF */ | ||
| 45 | case 0x1f0: return (void *)0xb4002000; | 46 | case 0x1f0: return (void *)0xb4002000; |
| 46 | case 0x1f1: return (void *)0xb4012800; | 47 | case 0x1f1: return (void *)0xb4012800; |
| 47 | case 0x1f2: return (void *)0xb4012002; | 48 | case 0x1f2: return (void *)0xb4012002; |
| @@ -51,6 +52,17 @@ static inline void *__port2addr_ata(unsigned long port) | |||
| 51 | case 0x1f6: return (void *)0xb4012006; | 52 | case 0x1f6: return (void *)0xb4012006; |
| 52 | case 0x1f7: return (void *)0xb4012806; | 53 | case 0x1f7: return (void *)0xb4012806; |
| 53 | case 0x3f6: return (void *)0xb401200e; | 54 | case 0x3f6: return (void *)0xb401200e; |
| 55 | /* IDE1 IDE */ | ||
| 56 | case 0x170: return (void *)0xb4810000; /* Data 16bit */ | ||
| 57 | case 0x171: return (void *)0xb4810002; /* Features / Error */ | ||
| 58 | case 0x172: return (void *)0xb4810004; /* Sector count */ | ||
| 59 | case 0x173: return (void *)0xb4810006; /* Sector number */ | ||
| 60 | case 0x174: return (void *)0xb4810008; /* Cylinder low */ | ||
| 61 | case 0x175: return (void *)0xb481000a; /* Cylinder high */ | ||
| 62 | case 0x176: return (void *)0xb481000c; /* Device head */ | ||
| 63 | case 0x177: return (void *)0xb481000e; /* Command */ | ||
| 64 | case 0x376: return (void *)0xb480800c; /* Device control / Alt status */ | ||
| 65 | |||
| 54 | default: return (void *)&dummy_reg; | 66 | default: return (void *)&dummy_reg; |
| 55 | } | 67 | } |
| 56 | } | 68 | } |
| @@ -108,8 +120,9 @@ unsigned char _inb(unsigned long port) | |||
| 108 | { | 120 | { |
| 109 | if (port >= LAN_IOSTART && port < LAN_IOEND) | 121 | if (port >= LAN_IOSTART && port < LAN_IOEND) |
| 110 | return _ne_inb(PORT2ADDR_NE(port)); | 122 | return _ne_inb(PORT2ADDR_NE(port)); |
| 111 | #if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC) | 123 | #if defined(CONFIG_IDE) |
| 112 | else if ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) { | 124 | else if ( ((port >= 0x170 && port <=0x177) || port == 0x376) || |
| 125 | ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) ){ | ||
| 113 | return *(volatile unsigned char *)__port2addr_ata(port); | 126 | return *(volatile unsigned char *)__port2addr_ata(port); |
| 114 | } | 127 | } |
| 115 | #endif | 128 | #endif |
| @@ -127,8 +140,9 @@ unsigned short _inw(unsigned long port) | |||
| 127 | { | 140 | { |
| 128 | if (port >= LAN_IOSTART && port < LAN_IOEND) | 141 | if (port >= LAN_IOSTART && port < LAN_IOEND) |
| 129 | return _ne_inw(PORT2ADDR_NE(port)); | 142 | return _ne_inw(PORT2ADDR_NE(port)); |
| 130 | #if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC) | 143 | #if defined(CONFIG_IDE) |
| 131 | else if ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) { | 144 | else if ( ((port >= 0x170 && port <=0x177) || port == 0x376) || |
| 145 | ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) ){ | ||
| 132 | return *(volatile unsigned short *)__port2addr_ata(port); | 146 | return *(volatile unsigned short *)__port2addr_ata(port); |
| 133 | } | 147 | } |
| 134 | #endif | 148 | #endif |
| @@ -185,8 +199,9 @@ void _outb(unsigned char b, unsigned long port) | |||
| 185 | if (port >= LAN_IOSTART && port < LAN_IOEND) | 199 | if (port >= LAN_IOSTART && port < LAN_IOEND) |
| 186 | _ne_outb(b, PORT2ADDR_NE(port)); | 200 | _ne_outb(b, PORT2ADDR_NE(port)); |
| 187 | else | 201 | else |
| 188 | #if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC) | 202 | #if defined(CONFIG_IDE) |
| 189 | if ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) { | 203 | if ( ((port >= 0x170 && port <=0x177) || port == 0x376) || |
| 204 | ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) ){ | ||
| 190 | *(volatile unsigned char *)__port2addr_ata(port) = b; | 205 | *(volatile unsigned char *)__port2addr_ata(port) = b; |
| 191 | } else | 206 | } else |
| 192 | #endif | 207 | #endif |
| @@ -203,8 +218,9 @@ void _outw(unsigned short w, unsigned long port) | |||
| 203 | if (port >= LAN_IOSTART && port < LAN_IOEND) | 218 | if (port >= LAN_IOSTART && port < LAN_IOEND) |
| 204 | _ne_outw(w, PORT2ADDR_NE(port)); | 219 | _ne_outw(w, PORT2ADDR_NE(port)); |
| 205 | else | 220 | else |
| 206 | #if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC) | 221 | #if defined(CONFIG_IDE) |
| 207 | if ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) { | 222 | if ( ((port >= 0x170 && port <=0x177) || port == 0x376) || |
| 223 | ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) ){ | ||
| 208 | *(volatile unsigned short *)__port2addr_ata(port) = w; | 224 | *(volatile unsigned short *)__port2addr_ata(port) = w; |
| 209 | } else | 225 | } else |
| 210 | #endif | 226 | #endif |
| @@ -253,8 +269,9 @@ void _insb(unsigned int port, void * addr, unsigned long count) | |||
| 253 | { | 269 | { |
| 254 | if (port >= LAN_IOSTART && port < LAN_IOEND) | 270 | if (port >= LAN_IOSTART && port < LAN_IOEND) |
| 255 | _ne_insb(PORT2ADDR_NE(port), addr, count); | 271 | _ne_insb(PORT2ADDR_NE(port), addr, count); |
| 256 | #if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC) | 272 | #if defined(CONFIG_IDE) |
| 257 | else if ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) { | 273 | else if ( ((port >= 0x170 && port <=0x177) || port == 0x376) || |
| 274 | ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) ){ | ||
| 258 | unsigned char *buf = addr; | 275 | unsigned char *buf = addr; |
| 259 | unsigned char *portp = __port2addr_ata(port); | 276 | unsigned char *portp = __port2addr_ata(port); |
| 260 | while (count--) | 277 | while (count--) |
| @@ -289,8 +306,9 @@ void _insw(unsigned int port, void * addr, unsigned long count) | |||
| 289 | pcc_ioread_word(9, port, (void *)addr, sizeof(unsigned short), | 306 | pcc_ioread_word(9, port, (void *)addr, sizeof(unsigned short), |
| 290 | count, 1); | 307 | count, 1); |
| 291 | #endif | 308 | #endif |
| 292 | #if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC) | 309 | #if defined(CONFIG_IDE) |
| 293 | } else if ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) { | 310 | } else if ( ((port >= 0x170 && port <=0x177) || port == 0x376) || |
| 311 | ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) ){ | ||
| 294 | portp = __port2addr_ata(port); | 312 | portp = __port2addr_ata(port); |
| 295 | while (count--) | 313 | while (count--) |
| 296 | *buf++ = *(volatile unsigned short *)portp; | 314 | *buf++ = *(volatile unsigned short *)portp; |
| @@ -321,8 +339,9 @@ void _outsb(unsigned int port, const void * addr, unsigned long count) | |||
| 321 | portp = PORT2ADDR_NE(port); | 339 | portp = PORT2ADDR_NE(port); |
| 322 | while (count--) | 340 | while (count--) |
| 323 | _ne_outb(*buf++, portp); | 341 | _ne_outb(*buf++, portp); |
| 324 | #if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC) | 342 | #if defined(CONFIG_IDE) |
| 325 | } else if ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) { | 343 | } else if ( ((port >= 0x170 && port <=0x177) || port == 0x376) || |
| 344 | ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) ){ | ||
| 326 | portp = __port2addr_ata(port); | 345 | portp = __port2addr_ata(port); |
| 327 | while (count--) | 346 | while (count--) |
| 328 | *(volatile unsigned char *)portp = *buf++; | 347 | *(volatile unsigned char *)portp = *buf++; |
| @@ -348,8 +367,9 @@ void _outsw(unsigned int port, const void * addr, unsigned long count) | |||
| 348 | portp = PORT2ADDR_NE(port); | 367 | portp = PORT2ADDR_NE(port); |
| 349 | while (count--) | 368 | while (count--) |
| 350 | *(volatile unsigned short *)portp = *buf++; | 369 | *(volatile unsigned short *)portp = *buf++; |
| 351 | #if defined(CONFIG_IDE) && !defined(CONFIG_M32R_CFC) | 370 | #if defined(CONFIG_IDE) |
| 352 | } else if ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) { | 371 | } else if ( ((port >= 0x170 && port <=0x177) || port == 0x376) || |
| 372 | ((port >= 0x1f0 && port <=0x1f7) || port == 0x3f6) ){ | ||
| 353 | portp = __port2addr_ata(port); | 373 | portp = __port2addr_ata(port); |
| 354 | while (count--) | 374 | while (count--) |
| 355 | *(volatile unsigned short *)portp = *buf++; | 375 | *(volatile unsigned short *)portp = *buf++; |
diff --git a/arch/m32r/kernel/setup_mappi3.c b/arch/m32r/kernel/setup_mappi3.c index 9c79341a7b45..f6ecdf7f555c 100644 --- a/arch/m32r/kernel/setup_mappi3.c +++ b/arch/m32r/kernel/setup_mappi3.c | |||
| @@ -151,7 +151,7 @@ void __init init_IRQ(void) | |||
| 151 | disable_mappi3_irq(M32R_IRQ_INT1); | 151 | disable_mappi3_irq(M32R_IRQ_INT1); |
| 152 | #endif /* CONFIG_USB */ | 152 | #endif /* CONFIG_USB */ |
| 153 | 153 | ||
| 154 | /* ICUCR40: CFC IREQ */ | 154 | /* CFC IREQ */ |
| 155 | irq_desc[PLD_IRQ_CFIREQ].status = IRQ_DISABLED; | 155 | irq_desc[PLD_IRQ_CFIREQ].status = IRQ_DISABLED; |
| 156 | irq_desc[PLD_IRQ_CFIREQ].handler = &mappi3_irq_type; | 156 | irq_desc[PLD_IRQ_CFIREQ].handler = &mappi3_irq_type; |
| 157 | irq_desc[PLD_IRQ_CFIREQ].action = 0; | 157 | irq_desc[PLD_IRQ_CFIREQ].action = 0; |
| @@ -160,7 +160,7 @@ void __init init_IRQ(void) | |||
| 160 | disable_mappi3_irq(PLD_IRQ_CFIREQ); | 160 | disable_mappi3_irq(PLD_IRQ_CFIREQ); |
| 161 | 161 | ||
| 162 | #if defined(CONFIG_M32R_CFC) | 162 | #if defined(CONFIG_M32R_CFC) |
| 163 | /* ICUCR41: CFC Insert */ | 163 | /* ICUCR41: CFC Insert & eject */ |
| 164 | irq_desc[PLD_IRQ_CFC_INSERT].status = IRQ_DISABLED; | 164 | irq_desc[PLD_IRQ_CFC_INSERT].status = IRQ_DISABLED; |
| 165 | irq_desc[PLD_IRQ_CFC_INSERT].handler = &mappi3_irq_type; | 165 | irq_desc[PLD_IRQ_CFC_INSERT].handler = &mappi3_irq_type; |
| 166 | irq_desc[PLD_IRQ_CFC_INSERT].action = 0; | 166 | irq_desc[PLD_IRQ_CFC_INSERT].action = 0; |
| @@ -168,14 +168,16 @@ void __init init_IRQ(void) | |||
| 168 | icu_data[PLD_IRQ_CFC_INSERT].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD00; | 168 | icu_data[PLD_IRQ_CFC_INSERT].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD00; |
| 169 | disable_mappi3_irq(PLD_IRQ_CFC_INSERT); | 169 | disable_mappi3_irq(PLD_IRQ_CFC_INSERT); |
| 170 | 170 | ||
| 171 | /* ICUCR42: CFC Eject */ | ||
| 172 | irq_desc[PLD_IRQ_CFC_EJECT].status = IRQ_DISABLED; | ||
| 173 | irq_desc[PLD_IRQ_CFC_EJECT].handler = &mappi3_irq_type; | ||
| 174 | irq_desc[PLD_IRQ_CFC_EJECT].action = 0; | ||
| 175 | irq_desc[PLD_IRQ_CFC_EJECT].depth = 1; /* disable nested irq */ | ||
| 176 | icu_data[PLD_IRQ_CFC_EJECT].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD10; | ||
| 177 | disable_mappi3_irq(PLD_IRQ_CFC_EJECT); | ||
| 178 | #endif /* CONFIG_M32R_CFC */ | 171 | #endif /* CONFIG_M32R_CFC */ |
| 172 | |||
| 173 | /* IDE IREQ */ | ||
| 174 | irq_desc[PLD_IRQ_IDEIREQ].status = IRQ_DISABLED; | ||
| 175 | irq_desc[PLD_IRQ_IDEIREQ].handler = &mappi3_irq_type; | ||
| 176 | irq_desc[PLD_IRQ_IDEIREQ].action = 0; | ||
| 177 | irq_desc[PLD_IRQ_IDEIREQ].depth = 1; /* disable nested irq */ | ||
| 178 | icu_data[PLD_IRQ_IDEIREQ].icucr = M32R_ICUCR_IEN|M32R_ICUCR_ISMOD10; | ||
| 179 | disable_mappi3_irq(PLD_IRQ_IDEIREQ); | ||
| 180 | |||
| 179 | } | 181 | } |
| 180 | 182 | ||
| 181 | #if defined(CONFIG_SMC91X) | 183 | #if defined(CONFIG_SMC91X) |
diff --git a/arch/m32r/kernel/sys_m32r.c b/arch/m32r/kernel/sys_m32r.c index e0500e12c5fb..fe55b28d3725 100644 --- a/arch/m32r/kernel/sys_m32r.c +++ b/arch/m32r/kernel/sys_m32r.c | |||
| @@ -41,7 +41,8 @@ asmlinkage int sys_tas(int *addr) | |||
| 41 | return -EFAULT; | 41 | return -EFAULT; |
| 42 | local_irq_save(flags); | 42 | local_irq_save(flags); |
| 43 | oldval = *addr; | 43 | oldval = *addr; |
| 44 | *addr = 1; | 44 | if (!oldval) |
| 45 | *addr = 1; | ||
| 45 | local_irq_restore(flags); | 46 | local_irq_restore(flags); |
| 46 | return oldval; | 47 | return oldval; |
| 47 | } | 48 | } |
| @@ -59,7 +60,8 @@ asmlinkage int sys_tas(int *addr) | |||
| 59 | 60 | ||
| 60 | _raw_spin_lock(&tas_lock); | 61 | _raw_spin_lock(&tas_lock); |
| 61 | oldval = *addr; | 62 | oldval = *addr; |
| 62 | *addr = 1; | 63 | if (!oldval) |
| 64 | *addr = 1; | ||
| 63 | _raw_spin_unlock(&tas_lock); | 65 | _raw_spin_unlock(&tas_lock); |
| 64 | 66 | ||
| 65 | return oldval; | 67 | return oldval; |
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index 98f67c78d1bd..a13eb575f834 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile | |||
| @@ -61,15 +61,17 @@ endif | |||
| 61 | LDFLAGS_vmlinux := -Bstatic | 61 | LDFLAGS_vmlinux := -Bstatic |
| 62 | 62 | ||
| 63 | # The -Iarch/$(ARCH)/include is temporary while we are merging | 63 | # The -Iarch/$(ARCH)/include is temporary while we are merging |
| 64 | CPPFLAGS += -Iarch/$(ARCH) -Iarch/$(ARCH)/include | 64 | CPPFLAGS-$(CONFIG_PPC32) := -Iarch/$(ARCH) -Iarch/$(ARCH)/include |
| 65 | AFLAGS += -Iarch/$(ARCH) | 65 | AFLAGS-$(CONFIG_PPC32) := -Iarch/$(ARCH) |
| 66 | CFLAGS += -Iarch/$(ARCH) -msoft-float -pipe | ||
| 67 | CFLAGS-$(CONFIG_PPC64) := -mminimal-toc -mtraceback=none -mcall-aixdesc | 66 | CFLAGS-$(CONFIG_PPC64) := -mminimal-toc -mtraceback=none -mcall-aixdesc |
| 68 | CFLAGS-$(CONFIG_PPC32) := -ffixed-r2 -mmultiple | 67 | CFLAGS-$(CONFIG_PPC32) := -Iarch/$(ARCH) -ffixed-r2 -mmultiple |
| 69 | CFLAGS += $(CFLAGS-y) | 68 | CPPFLAGS += $(CPPFLAGS-y) |
| 69 | AFLAGS += $(AFLAGS-y) | ||
| 70 | CFLAGS += -msoft-float -pipe $(CFLAGS-y) | ||
| 70 | CPP = $(CC) -E $(CFLAGS) | 71 | CPP = $(CC) -E $(CFLAGS) |
| 71 | # Temporary hack until we have migrated to asm-powerpc | 72 | # Temporary hack until we have migrated to asm-powerpc |
| 72 | LINUXINCLUDE += -Iarch/$(ARCH)/include | 73 | LINUXINCLUDE-$(CONFIG_PPC32) := -Iarch/$(ARCH)/include |
| 74 | LINUXINCLUDE += $(LINUXINCLUDE-y) | ||
| 73 | 75 | ||
| 74 | CHECKFLAGS += -m$(SZ) -D__powerpc__ -D__powerpc$(SZ)__ | 76 | CHECKFLAGS += -m$(SZ) -D__powerpc__ -D__powerpc$(SZ)__ |
| 75 | 77 | ||
| @@ -173,11 +175,13 @@ archclean: | |||
| 173 | 175 | ||
| 174 | archprepare: checkbin | 176 | archprepare: checkbin |
| 175 | 177 | ||
| 178 | ifeq ($(CONFIG_PPC32),y) | ||
| 176 | # Temporary hack until we have migrated to asm-powerpc | 179 | # Temporary hack until we have migrated to asm-powerpc |
| 177 | include/asm: arch/$(ARCH)/include/asm | 180 | include/asm: arch/$(ARCH)/include/asm |
| 178 | arch/$(ARCH)/include/asm: FORCE | 181 | arch/$(ARCH)/include/asm: FORCE |
| 179 | $(Q)if [ ! -d arch/$(ARCH)/include ]; then mkdir -p arch/$(ARCH)/include; fi | 182 | $(Q)if [ ! -d arch/$(ARCH)/include ]; then mkdir -p arch/$(ARCH)/include; fi |
| 180 | $(Q)ln -fsn $(srctree)/include/asm-$(OLDARCH) arch/$(ARCH)/include/asm | 183 | $(Q)ln -fsn $(srctree)/include/asm-$(OLDARCH) arch/$(ARCH)/include/asm |
| 184 | endif | ||
| 181 | 185 | ||
| 182 | # Use the file '.tmp_gas_check' for binutils tests, as gas won't output | 186 | # Use the file '.tmp_gas_check' for binutils tests, as gas won't output |
| 183 | # to stdout and these checks are run even on install targets. | 187 | # to stdout and these checks are run even on install targets. |
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index b44b36e0c293..f0c47dab0903 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c | |||
| @@ -145,8 +145,7 @@ static void dump_vdso_pages(struct vm_area_struct * vma) | |||
| 145 | struct page *pg = virt_to_page(vdso32_kbase + | 145 | struct page *pg = virt_to_page(vdso32_kbase + |
| 146 | i*PAGE_SIZE); | 146 | i*PAGE_SIZE); |
| 147 | struct page *upg = (vma && vma->vm_mm) ? | 147 | struct page *upg = (vma && vma->vm_mm) ? |
| 148 | follow_page(vma->vm_mm, vma->vm_start + | 148 | follow_page(vma, vma->vm_start + i*PAGE_SIZE, 0) |
| 149 | i*PAGE_SIZE, 0) | ||
| 150 | : NULL; | 149 | : NULL; |
| 151 | dump_one_vdso_page(pg, upg); | 150 | dump_one_vdso_page(pg, upg); |
| 152 | } | 151 | } |
| @@ -157,8 +156,7 @@ static void dump_vdso_pages(struct vm_area_struct * vma) | |||
| 157 | struct page *pg = virt_to_page(vdso64_kbase + | 156 | struct page *pg = virt_to_page(vdso64_kbase + |
| 158 | i*PAGE_SIZE); | 157 | i*PAGE_SIZE); |
| 159 | struct page *upg = (vma && vma->vm_mm) ? | 158 | struct page *upg = (vma && vma->vm_mm) ? |
| 160 | follow_page(vma->vm_mm, vma->vm_start + | 159 | follow_page(vma, vma->vm_start + i*PAGE_SIZE, 0) |
| 161 | i*PAGE_SIZE, 0) | ||
| 162 | : NULL; | 160 | : NULL; |
| 163 | dump_one_vdso_page(pg, upg); | 161 | dump_one_vdso_page(pg, upg); |
| 164 | } | 162 | } |
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index f867bba893ca..6bc9dbad7dea 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c | |||
| @@ -295,7 +295,7 @@ int prepare_hugepage_range(unsigned long addr, unsigned long len) | |||
| 295 | if (addr < 0x100000000UL) | 295 | if (addr < 0x100000000UL) |
| 296 | err = open_low_hpage_areas(current->mm, | 296 | err = open_low_hpage_areas(current->mm, |
| 297 | LOW_ESID_MASK(addr, len)); | 297 | LOW_ESID_MASK(addr, len)); |
| 298 | if ((addr + len) >= 0x100000000UL) | 298 | if ((addr + len) > 0x100000000UL) |
| 299 | err = open_high_hpage_areas(current->mm, | 299 | err = open_high_hpage_areas(current->mm, |
| 300 | HTLB_AREA_MASK(addr, len)); | 300 | HTLB_AREA_MASK(addr, len)); |
| 301 | if (err) { | 301 | if (err) { |
diff --git a/arch/powerpc/platforms/iseries/iommu.c b/arch/powerpc/platforms/iseries/iommu.c index bf081b345820..2b54eeb2c899 100644 --- a/arch/powerpc/platforms/iseries/iommu.c +++ b/arch/powerpc/platforms/iseries/iommu.c | |||
| @@ -3,7 +3,7 @@ | |||
| 3 | * | 3 | * |
| 4 | * Rewrite, cleanup: | 4 | * Rewrite, cleanup: |
| 5 | * | 5 | * |
| 6 | * Copyright (C) 2004 Olof Johansson <olof@austin.ibm.com>, IBM Corporation | 6 | * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation |
| 7 | * | 7 | * |
| 8 | * Dynamic DMA mapping support, iSeries-specific parts. | 8 | * Dynamic DMA mapping support, iSeries-specific parts. |
| 9 | * | 9 | * |
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index 97ba5214417f..c78f2b290a73 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c | |||
| @@ -5,7 +5,7 @@ | |||
| 5 | * | 5 | * |
| 6 | * Rewrite, cleanup: | 6 | * Rewrite, cleanup: |
| 7 | * | 7 | * |
| 8 | * Copyright (C) 2004 Olof Johansson <olof@austin.ibm.com>, IBM Corporation | 8 | * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation |
| 9 | * | 9 | * |
| 10 | * Dynamic DMA mapping support, pSeries-specific parts, both SMP and LPAR. | 10 | * Dynamic DMA mapping support, pSeries-specific parts, both SMP and LPAR. |
| 11 | * | 11 | * |
diff --git a/arch/powerpc/sysdev/dart.h b/arch/powerpc/sysdev/dart.h index ea8f0d9eed8a..33ed9ed7fc1e 100644 --- a/arch/powerpc/sysdev/dart.h +++ b/arch/powerpc/sysdev/dart.h | |||
| @@ -1,5 +1,5 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Copyright (C) 2004 Olof Johansson <olof@austin.ibm.com>, IBM Corporation | 2 | * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation |
| 3 | * | 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify | 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License as published by | 5 | * it under the terms of the GNU General Public License as published by |
diff --git a/arch/powerpc/sysdev/u3_iommu.c b/arch/powerpc/sysdev/u3_iommu.c index f32baf7f4693..5c1a26a6d00c 100644 --- a/arch/powerpc/sysdev/u3_iommu.c +++ b/arch/powerpc/sysdev/u3_iommu.c | |||
| @@ -1,11 +1,11 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * arch/powerpc/sysdev/u3_iommu.c | 2 | * arch/powerpc/sysdev/u3_iommu.c |
| 3 | * | 3 | * |
| 4 | * Copyright (C) 2004 Olof Johansson <olof@austin.ibm.com>, IBM Corporation | 4 | * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation |
| 5 | * | 5 | * |
| 6 | * Based on pSeries_iommu.c: | 6 | * Based on pSeries_iommu.c: |
| 7 | * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation | 7 | * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation |
| 8 | * Copyright (C) 2004 Olof Johansson <olof@austin.ibm.com>, IBM Corporation | 8 | * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation |
| 9 | * | 9 | * |
| 10 | * Dynamic DMA mapping support, Apple U3 & IBM CPC925 "DART" iommu. | 10 | * Dynamic DMA mapping support, Apple U3 & IBM CPC925 "DART" iommu. |
| 11 | * | 11 | * |
diff --git a/arch/sparc/mm/generic.c b/arch/sparc/mm/generic.c index 0410bae681f8..2cb0728cee05 100644 --- a/arch/sparc/mm/generic.c +++ b/arch/sparc/mm/generic.c | |||
| @@ -32,9 +32,7 @@ static inline void io_remap_pte_range(struct mm_struct *mm, pte_t * pte, unsigne | |||
| 32 | if (end > PMD_SIZE) | 32 | if (end > PMD_SIZE) |
| 33 | end = PMD_SIZE; | 33 | end = PMD_SIZE; |
| 34 | do { | 34 | do { |
| 35 | pte_t oldpage = *pte; | 35 | set_pte_at(mm, address, pte, mk_pte_io(offset, prot, space)); |
| 36 | pte_clear(mm, address, pte); | ||
| 37 | set_pte(pte, mk_pte_io(offset, prot, space)); | ||
| 38 | address += PAGE_SIZE; | 36 | address += PAGE_SIZE; |
| 39 | offset += PAGE_SIZE; | 37 | offset += PAGE_SIZE; |
| 40 | pte++; | 38 | pte++; |
| @@ -63,7 +61,7 @@ static inline int io_remap_pmd_range(struct mm_struct *mm, pmd_t * pmd, unsigned | |||
| 63 | } | 61 | } |
| 64 | 62 | ||
| 65 | int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from, | 63 | int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from, |
| 66 | unsigned long pfn, unsigned long size, pgprot_t prot) | 64 | unsigned long pfn, unsigned long size, pgprot_t prot) |
| 67 | { | 65 | { |
| 68 | int error = 0; | 66 | int error = 0; |
| 69 | pgd_t * dir; | 67 | pgd_t * dir; |
| @@ -74,7 +72,9 @@ int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from, | |||
| 74 | unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT; | 72 | unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT; |
| 75 | 73 | ||
| 76 | /* See comment in mm/memory.c remap_pfn_range */ | 74 | /* See comment in mm/memory.c remap_pfn_range */ |
| 77 | vma->vm_flags |= VM_IO | VM_RESERVED | VM_UNPAGED; | 75 | vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; |
| 76 | vma->vm_pgoff = (offset >> PAGE_SHIFT) | | ||
| 77 | ((unsigned long)space << 28UL); | ||
| 78 | 78 | ||
| 79 | prot = __pgprot(pg_iobits); | 79 | prot = __pgprot(pg_iobits); |
| 80 | offset -= from; | 80 | offset -= from; |
diff --git a/arch/sparc64/mm/generic.c b/arch/sparc64/mm/generic.c index 8fd4cb1f050a..d9396c1721cd 100644 --- a/arch/sparc64/mm/generic.c +++ b/arch/sparc64/mm/generic.c | |||
| @@ -15,6 +15,15 @@ | |||
| 15 | #include <asm/page.h> | 15 | #include <asm/page.h> |
| 16 | #include <asm/tlbflush.h> | 16 | #include <asm/tlbflush.h> |
| 17 | 17 | ||
| 18 | static inline pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space) | ||
| 19 | { | ||
| 20 | pte_t pte; | ||
| 21 | pte_val(pte) = (((page) | pgprot_val(prot) | _PAGE_E) & | ||
| 22 | ~(unsigned long)_PAGE_CACHE); | ||
| 23 | pte_val(pte) |= (((unsigned long)space) << 32); | ||
| 24 | return pte; | ||
| 25 | } | ||
| 26 | |||
| 18 | /* Remap IO memory, the same way as remap_pfn_range(), but use | 27 | /* Remap IO memory, the same way as remap_pfn_range(), but use |
| 19 | * the obio memory space. | 28 | * the obio memory space. |
| 20 | * | 29 | * |
| @@ -126,9 +135,13 @@ int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from, | |||
| 126 | struct mm_struct *mm = vma->vm_mm; | 135 | struct mm_struct *mm = vma->vm_mm; |
| 127 | int space = GET_IOSPACE(pfn); | 136 | int space = GET_IOSPACE(pfn); |
| 128 | unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT; | 137 | unsigned long offset = GET_PFN(pfn) << PAGE_SHIFT; |
| 138 | unsigned long phys_base; | ||
| 139 | |||
| 140 | phys_base = offset | (((unsigned long) space) << 32UL); | ||
| 129 | 141 | ||
| 130 | /* See comment in mm/memory.c remap_pfn_range */ | 142 | /* See comment in mm/memory.c remap_pfn_range */ |
| 131 | vma->vm_flags |= VM_IO | VM_RESERVED | VM_UNPAGED; | 143 | vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; |
| 144 | vma->vm_pgoff = phys_base >> PAGE_SHIFT; | ||
| 132 | 145 | ||
| 133 | prot = __pgprot(pg_iobits); | 146 | prot = __pgprot(pg_iobits); |
| 134 | offset -= from; | 147 | offset -= from; |
diff --git a/drivers/char/drm/drm_lock.c b/drivers/char/drm/drm_lock.c index b276ae8a6633..b48a595d54ec 100644 --- a/drivers/char/drm/drm_lock.c +++ b/drivers/char/drm/drm_lock.c | |||
| @@ -104,6 +104,10 @@ int drm_lock(struct inode *inode, struct file *filp, | |||
| 104 | __set_current_state(TASK_RUNNING); | 104 | __set_current_state(TASK_RUNNING); |
| 105 | remove_wait_queue(&dev->lock.lock_queue, &entry); | 105 | remove_wait_queue(&dev->lock.lock_queue, &entry); |
| 106 | 106 | ||
| 107 | DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock"); | ||
| 108 | if (ret) | ||
| 109 | return ret; | ||
| 110 | |||
| 107 | sigemptyset(&dev->sigmask); | 111 | sigemptyset(&dev->sigmask); |
| 108 | sigaddset(&dev->sigmask, SIGSTOP); | 112 | sigaddset(&dev->sigmask, SIGSTOP); |
| 109 | sigaddset(&dev->sigmask, SIGTSTP); | 113 | sigaddset(&dev->sigmask, SIGTSTP); |
| @@ -116,8 +120,12 @@ int drm_lock(struct inode *inode, struct file *filp, | |||
| 116 | if (dev->driver->dma_ready && (lock.flags & _DRM_LOCK_READY)) | 120 | if (dev->driver->dma_ready && (lock.flags & _DRM_LOCK_READY)) |
| 117 | dev->driver->dma_ready(dev); | 121 | dev->driver->dma_ready(dev); |
| 118 | 122 | ||
| 119 | if (dev->driver->dma_quiescent && (lock.flags & _DRM_LOCK_QUIESCENT)) | 123 | if (dev->driver->dma_quiescent && (lock.flags & _DRM_LOCK_QUIESCENT)) { |
| 120 | return dev->driver->dma_quiescent(dev); | 124 | if (dev->driver->dma_quiescent(dev)) { |
| 125 | DRM_DEBUG("%d waiting for DMA quiescent\n", lock.context); | ||
| 126 | return DRM_ERR(EBUSY); | ||
| 127 | } | ||
| 128 | } | ||
| 121 | 129 | ||
| 122 | /* dev->driver->kernel_context_switch isn't used by any of the x86 | 130 | /* dev->driver->kernel_context_switch isn't used by any of the x86 |
| 123 | * drivers but is used by the Sparc driver. | 131 | * drivers but is used by the Sparc driver. |
| @@ -128,9 +136,7 @@ int drm_lock(struct inode *inode, struct file *filp, | |||
| 128 | dev->driver->kernel_context_switch(dev, dev->last_context, | 136 | dev->driver->kernel_context_switch(dev, dev->last_context, |
| 129 | lock.context); | 137 | lock.context); |
| 130 | } | 138 | } |
| 131 | DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock"); | 139 | return 0; |
| 132 | |||
| 133 | return ret; | ||
| 134 | } | 140 | } |
| 135 | 141 | ||
| 136 | /** | 142 | /** |
diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 29c3b631445a..91dd669273e0 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c | |||
| @@ -591,7 +591,7 @@ static inline size_t read_zero_pagealigned(char __user * buf, size_t size) | |||
| 591 | 591 | ||
| 592 | if (vma->vm_start > addr || (vma->vm_flags & VM_WRITE) == 0) | 592 | if (vma->vm_start > addr || (vma->vm_flags & VM_WRITE) == 0) |
| 593 | goto out_up; | 593 | goto out_up; |
| 594 | if (vma->vm_flags & (VM_SHARED | VM_HUGETLB | VM_UNPAGED)) | 594 | if (vma->vm_flags & (VM_SHARED | VM_HUGETLB)) |
| 595 | break; | 595 | break; |
| 596 | count = vma->vm_end - addr; | 596 | count = vma->vm_end - addr; |
| 597 | if (count > size) | 597 | if (count > size) |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 1c0f62d0f938..815902c2c856 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
| @@ -1113,21 +1113,13 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy, | |||
| 1113 | { | 1113 | { |
| 1114 | int retval = -EINVAL; | 1114 | int retval = -EINVAL; |
| 1115 | 1115 | ||
| 1116 | /* | 1116 | lock_cpu_hotplug(); |
| 1117 | * If we are already in context of hotplug thread, we dont need to | ||
| 1118 | * acquire the hotplug lock. Otherwise acquire cpucontrol to prevent | ||
| 1119 | * hotplug from removing this cpu that we are working on. | ||
| 1120 | */ | ||
| 1121 | if (!current_in_cpu_hotplug()) | ||
| 1122 | lock_cpu_hotplug(); | ||
| 1123 | |||
| 1124 | dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu, | 1117 | dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu, |
| 1125 | target_freq, relation); | 1118 | target_freq, relation); |
| 1126 | if (cpu_online(policy->cpu) && cpufreq_driver->target) | 1119 | if (cpu_online(policy->cpu) && cpufreq_driver->target) |
| 1127 | retval = cpufreq_driver->target(policy, target_freq, relation); | 1120 | retval = cpufreq_driver->target(policy, target_freq, relation); |
| 1128 | 1121 | ||
| 1129 | if (!current_in_cpu_hotplug()) | 1122 | unlock_cpu_hotplug(); |
| 1130 | unlock_cpu_hotplug(); | ||
| 1131 | 1123 | ||
| 1132 | return retval; | 1124 | return retval; |
| 1133 | } | 1125 | } |
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 41d6b4017acb..d393b504bf26 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c | |||
| @@ -355,9 +355,9 @@ error4: | |||
| 355 | spin_unlock_irqrestore(&port_priv->reg_lock, flags); | 355 | spin_unlock_irqrestore(&port_priv->reg_lock, flags); |
| 356 | kfree(reg_req); | 356 | kfree(reg_req); |
| 357 | error3: | 357 | error3: |
| 358 | kfree(mad_agent_priv); | ||
| 359 | error2: | ||
| 360 | ib_dereg_mr(mad_agent_priv->agent.mr); | 358 | ib_dereg_mr(mad_agent_priv->agent.mr); |
| 359 | error2: | ||
| 360 | kfree(mad_agent_priv); | ||
| 361 | error1: | 361 | error1: |
| 362 | return ret; | 362 | return ret; |
| 363 | } | 363 | } |
diff --git a/drivers/md/md.c b/drivers/md/md.c index 78c7418478d6..cd12fca73b0d 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
| @@ -1028,7 +1028,6 @@ static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev) | |||
| 1028 | mddev->size = le64_to_cpu(sb->size)/2; | 1028 | mddev->size = le64_to_cpu(sb->size)/2; |
| 1029 | mddev->events = le64_to_cpu(sb->events); | 1029 | mddev->events = le64_to_cpu(sb->events); |
| 1030 | mddev->bitmap_offset = 0; | 1030 | mddev->bitmap_offset = 0; |
| 1031 | mddev->default_bitmap_offset = 0; | ||
| 1032 | mddev->default_bitmap_offset = 1024; | 1031 | mddev->default_bitmap_offset = 1024; |
| 1033 | 1032 | ||
| 1034 | mddev->recovery_cp = le64_to_cpu(sb->resync_offset); | 1033 | mddev->recovery_cp = le64_to_cpu(sb->resync_offset); |
| @@ -2932,6 +2931,9 @@ static int set_array_info(mddev_t * mddev, mdu_array_info_t *info) | |||
| 2932 | 2931 | ||
| 2933 | mddev->sb_dirty = 1; | 2932 | mddev->sb_dirty = 1; |
| 2934 | 2933 | ||
| 2934 | mddev->default_bitmap_offset = MD_SB_BYTES >> 9; | ||
| 2935 | mddev->bitmap_offset = 0; | ||
| 2936 | |||
| 2935 | /* | 2937 | /* |
| 2936 | * Generate a 128 bit UUID | 2938 | * Generate a 128 bit UUID |
| 2937 | */ | 2939 | */ |
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 2da9d3ba902d..3066c587b539 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
| @@ -953,9 +953,6 @@ static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) | |||
| 953 | int mirror = 0; | 953 | int mirror = 0; |
| 954 | mirror_info_t *p; | 954 | mirror_info_t *p; |
| 955 | 955 | ||
| 956 | if (rdev->saved_raid_disk >= 0 && | ||
| 957 | conf->mirrors[rdev->saved_raid_disk].rdev == NULL) | ||
| 958 | mirror = rdev->saved_raid_disk; | ||
| 959 | for (mirror=0; mirror < mddev->raid_disks; mirror++) | 956 | for (mirror=0; mirror < mddev->raid_disks; mirror++) |
| 960 | if ( !(p=conf->mirrors+mirror)->rdev) { | 957 | if ( !(p=conf->mirrors+mirror)->rdev) { |
| 961 | 958 | ||
| @@ -972,7 +969,10 @@ static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) | |||
| 972 | p->head_position = 0; | 969 | p->head_position = 0; |
| 973 | rdev->raid_disk = mirror; | 970 | rdev->raid_disk = mirror; |
| 974 | found = 1; | 971 | found = 1; |
| 975 | if (rdev->saved_raid_disk != mirror) | 972 | /* As all devices are equivalent, we don't need a full recovery |
| 973 | * if this was recently any drive of the array | ||
| 974 | */ | ||
| 975 | if (rdev->saved_raid_disk < 0) | ||
| 976 | conf->fullsync = 1; | 976 | conf->fullsync = 1; |
| 977 | rcu_assign_pointer(p->rdev, rdev); | 977 | rcu_assign_pointer(p->rdev, rdev); |
| 978 | break; | 978 | break; |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 867f06ae33d9..713dc9c2c730 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
| @@ -552,7 +552,11 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio) | |||
| 552 | !test_bit(In_sync, &rdev->flags)) | 552 | !test_bit(In_sync, &rdev->flags)) |
| 553 | continue; | 553 | continue; |
| 554 | 554 | ||
| 555 | if (!atomic_read(&rdev->nr_pending)) { | 555 | /* This optimisation is debatable, and completely destroys |
| 556 | * sequential read speed for 'far copies' arrays. So only | ||
| 557 | * keep it for 'near' arrays, and review those later. | ||
| 558 | */ | ||
| 559 | if (conf->near_copies > 1 && !atomic_read(&rdev->nr_pending)) { | ||
| 556 | disk = ndisk; | 560 | disk = ndisk; |
| 557 | slot = nslot; | 561 | slot = nslot; |
| 558 | break; | 562 | break; |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index e2a40283e323..36d5f8ac8265 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
| @@ -1704,7 +1704,9 @@ static void raid5d (mddev_t *mddev) | |||
| 1704 | 1704 | ||
| 1705 | if (conf->seq_flush - conf->seq_write > 0) { | 1705 | if (conf->seq_flush - conf->seq_write > 0) { |
| 1706 | int seq = conf->seq_flush; | 1706 | int seq = conf->seq_flush; |
| 1707 | spin_unlock_irq(&conf->device_lock); | ||
| 1707 | bitmap_unplug(mddev->bitmap); | 1708 | bitmap_unplug(mddev->bitmap); |
| 1709 | spin_lock_irq(&conf->device_lock); | ||
| 1708 | conf->seq_write = seq; | 1710 | conf->seq_write = seq; |
| 1709 | activate_bit_delay(conf); | 1711 | activate_bit_delay(conf); |
| 1710 | } | 1712 | } |
diff --git a/drivers/md/raid6main.c b/drivers/md/raid6main.c index eae5a35629c5..0000d162d198 100644 --- a/drivers/md/raid6main.c +++ b/drivers/md/raid6main.c | |||
| @@ -1702,6 +1702,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i | |||
| 1702 | int data_disks = raid_disks - 2; | 1702 | int data_disks = raid_disks - 2; |
| 1703 | sector_t max_sector = mddev->size << 1; | 1703 | sector_t max_sector = mddev->size << 1; |
| 1704 | int sync_blocks; | 1704 | int sync_blocks; |
| 1705 | int still_degraded = 0; | ||
| 1706 | int i; | ||
| 1705 | 1707 | ||
| 1706 | if (sector_nr >= max_sector) { | 1708 | if (sector_nr >= max_sector) { |
| 1707 | /* just being told to finish up .. nothing much to do */ | 1709 | /* just being told to finish up .. nothing much to do */ |
| @@ -1710,7 +1712,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i | |||
| 1710 | if (mddev->curr_resync < max_sector) /* aborted */ | 1712 | if (mddev->curr_resync < max_sector) /* aborted */ |
| 1711 | bitmap_end_sync(mddev->bitmap, mddev->curr_resync, | 1713 | bitmap_end_sync(mddev->bitmap, mddev->curr_resync, |
| 1712 | &sync_blocks, 1); | 1714 | &sync_blocks, 1); |
| 1713 | else /* compelted sync */ | 1715 | else /* completed sync */ |
| 1714 | conf->fullsync = 0; | 1716 | conf->fullsync = 0; |
| 1715 | bitmap_close_sync(mddev->bitmap); | 1717 | bitmap_close_sync(mddev->bitmap); |
| 1716 | 1718 | ||
| @@ -1748,7 +1750,16 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i | |||
| 1748 | */ | 1750 | */ |
| 1749 | schedule_timeout_uninterruptible(1); | 1751 | schedule_timeout_uninterruptible(1); |
| 1750 | } | 1752 | } |
| 1751 | bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 0); | 1753 | /* Need to check if array will still be degraded after recovery/resync |
| 1754 | * We don't need to check the 'failed' flag as when that gets set, | ||
| 1755 | * recovery aborts. | ||
| 1756 | */ | ||
| 1757 | for (i=0; i<mddev->raid_disks; i++) | ||
| 1758 | if (conf->disks[i].rdev == NULL) | ||
| 1759 | still_degraded = 1; | ||
| 1760 | |||
| 1761 | bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded); | ||
| 1762 | |||
| 1752 | spin_lock(&sh->lock); | 1763 | spin_lock(&sh->lock); |
| 1753 | set_bit(STRIPE_SYNCING, &sh->state); | 1764 | set_bit(STRIPE_SYNCING, &sh->state); |
| 1754 | clear_bit(STRIPE_INSYNC, &sh->state); | 1765 | clear_bit(STRIPE_INSYNC, &sh->state); |
| @@ -1784,7 +1795,9 @@ static void raid6d (mddev_t *mddev) | |||
| 1784 | 1795 | ||
| 1785 | if (conf->seq_flush - conf->seq_write > 0) { | 1796 | if (conf->seq_flush - conf->seq_write > 0) { |
| 1786 | int seq = conf->seq_flush; | 1797 | int seq = conf->seq_flush; |
| 1798 | spin_unlock_irq(&conf->device_lock); | ||
| 1787 | bitmap_unplug(mddev->bitmap); | 1799 | bitmap_unplug(mddev->bitmap); |
| 1800 | spin_lock_irq(&conf->device_lock); | ||
| 1788 | conf->seq_write = seq; | 1801 | conf->seq_write = seq; |
| 1789 | activate_bit_delay(conf); | 1802 | activate_bit_delay(conf); |
| 1790 | } | 1803 | } |
| @@ -2145,9 +2158,15 @@ static int raid6_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) | |||
| 2145 | /* no point adding a device */ | 2158 | /* no point adding a device */ |
| 2146 | return 0; | 2159 | return 0; |
| 2147 | /* | 2160 | /* |
| 2148 | * find the disk ... | 2161 | * find the disk ... but prefer rdev->saved_raid_disk |
| 2162 | * if possible. | ||
| 2149 | */ | 2163 | */ |
| 2150 | for (disk=0; disk < mddev->raid_disks; disk++) | 2164 | if (rdev->saved_raid_disk >= 0 && |
| 2165 | conf->disks[rdev->saved_raid_disk].rdev == NULL) | ||
| 2166 | disk = rdev->saved_raid_disk; | ||
| 2167 | else | ||
| 2168 | disk = 0; | ||
| 2169 | for ( ; disk < mddev->raid_disks; disk++) | ||
| 2151 | if ((p=conf->disks + disk)->rdev == NULL) { | 2170 | if ((p=conf->disks + disk)->rdev == NULL) { |
| 2152 | clear_bit(In_sync, &rdev->flags); | 2171 | clear_bit(In_sync, &rdev->flags); |
| 2153 | rdev->raid_disk = disk; | 2172 | rdev->raid_disk = disk; |
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig index 1a3b3c7e5e99..ecb9a31dd003 100644 --- a/drivers/media/video/Kconfig +++ b/drivers/media/video/Kconfig | |||
| @@ -26,7 +26,7 @@ config VIDEO_BT848 | |||
| 26 | module will be called bttv. | 26 | module will be called bttv. |
| 27 | 27 | ||
| 28 | config VIDEO_BT848_DVB | 28 | config VIDEO_BT848_DVB |
| 29 | tristate "DVB/ATSC Support for bt878 based TV cards" | 29 | bool "DVB/ATSC Support for bt878 based TV cards" |
| 30 | depends on VIDEO_BT848 && DVB_CORE | 30 | depends on VIDEO_BT848 && DVB_CORE |
| 31 | select DVB_BT8XX | 31 | select DVB_BT8XX |
| 32 | ---help--- | 32 | ---help--- |
diff --git a/drivers/media/video/cx88/Kconfig b/drivers/media/video/cx88/Kconfig index 41818b6205b3..85ba4106dc79 100644 --- a/drivers/media/video/cx88/Kconfig +++ b/drivers/media/video/cx88/Kconfig | |||
| @@ -46,8 +46,8 @@ config VIDEO_CX88_DVB_ALL_FRONTENDS | |||
| 46 | If you are unsure, choose Y. | 46 | If you are unsure, choose Y. |
| 47 | 47 | ||
| 48 | config VIDEO_CX88_DVB_MT352 | 48 | config VIDEO_CX88_DVB_MT352 |
| 49 | tristate "Zarlink MT352 DVB-T Support" | 49 | bool "Zarlink MT352 DVB-T Support" |
| 50 | default m | 50 | default y |
| 51 | depends on VIDEO_CX88_DVB && !VIDEO_CX88_DVB_ALL_FRONTENDS | 51 | depends on VIDEO_CX88_DVB && !VIDEO_CX88_DVB_ALL_FRONTENDS |
| 52 | select DVB_MT352 | 52 | select DVB_MT352 |
| 53 | ---help--- | 53 | ---help--- |
| @@ -55,8 +55,8 @@ config VIDEO_CX88_DVB_MT352 | |||
| 55 | Connexant 2388x chip and the MT352 demodulator. | 55 | Connexant 2388x chip and the MT352 demodulator. |
| 56 | 56 | ||
| 57 | config VIDEO_CX88_DVB_OR51132 | 57 | config VIDEO_CX88_DVB_OR51132 |
| 58 | tristate "OR51132 ATSC Support" | 58 | bool "OR51132 ATSC Support" |
| 59 | default m | 59 | default y |
| 60 | depends on VIDEO_CX88_DVB && !VIDEO_CX88_DVB_ALL_FRONTENDS | 60 | depends on VIDEO_CX88_DVB && !VIDEO_CX88_DVB_ALL_FRONTENDS |
| 61 | select DVB_OR51132 | 61 | select DVB_OR51132 |
| 62 | ---help--- | 62 | ---help--- |
| @@ -64,8 +64,8 @@ config VIDEO_CX88_DVB_OR51132 | |||
| 64 | Connexant 2388x chip and the OR51132 demodulator. | 64 | Connexant 2388x chip and the OR51132 demodulator. |
| 65 | 65 | ||
| 66 | config VIDEO_CX88_DVB_CX22702 | 66 | config VIDEO_CX88_DVB_CX22702 |
| 67 | tristate "Conexant CX22702 DVB-T Support" | 67 | bool "Conexant CX22702 DVB-T Support" |
| 68 | default m | 68 | default y |
| 69 | depends on VIDEO_CX88_DVB && !VIDEO_CX88_DVB_ALL_FRONTENDS | 69 | depends on VIDEO_CX88_DVB && !VIDEO_CX88_DVB_ALL_FRONTENDS |
| 70 | select DVB_CX22702 | 70 | select DVB_CX22702 |
| 71 | ---help--- | 71 | ---help--- |
| @@ -73,8 +73,8 @@ config VIDEO_CX88_DVB_CX22702 | |||
| 73 | Connexant 2388x chip and the CX22702 demodulator. | 73 | Connexant 2388x chip and the CX22702 demodulator. |
| 74 | 74 | ||
| 75 | config VIDEO_CX88_DVB_LGDT330X | 75 | config VIDEO_CX88_DVB_LGDT330X |
| 76 | tristate "LG Electronics DT3302/DT3303 ATSC Support" | 76 | bool "LG Electronics DT3302/DT3303 ATSC Support" |
| 77 | default m | 77 | default y |
| 78 | depends on VIDEO_CX88_DVB && !VIDEO_CX88_DVB_ALL_FRONTENDS | 78 | depends on VIDEO_CX88_DVB && !VIDEO_CX88_DVB_ALL_FRONTENDS |
| 79 | select DVB_LGDT330X | 79 | select DVB_LGDT330X |
| 80 | ---help--- | 80 | ---help--- |
| @@ -82,8 +82,8 @@ config VIDEO_CX88_DVB_LGDT330X | |||
| 82 | Connexant 2388x chip and the LGDT3302/LGDT3303 demodulator. | 82 | Connexant 2388x chip and the LGDT3302/LGDT3303 demodulator. |
| 83 | 83 | ||
| 84 | config VIDEO_CX88_DVB_NXT200X | 84 | config VIDEO_CX88_DVB_NXT200X |
| 85 | tristate "NXT2002/NXT2004 ATSC Support" | 85 | bool "NXT2002/NXT2004 ATSC Support" |
| 86 | default m | 86 | default y |
| 87 | depends on VIDEO_CX88_DVB && !VIDEO_CX88_DVB_ALL_FRONTENDS | 87 | depends on VIDEO_CX88_DVB && !VIDEO_CX88_DVB_ALL_FRONTENDS |
| 88 | select DVB_NXT200X | 88 | select DVB_NXT200X |
| 89 | ---help--- | 89 | ---help--- |
diff --git a/drivers/media/video/cx88/Makefile b/drivers/media/video/cx88/Makefile index 0df40b773454..54401b02b7ce 100644 --- a/drivers/media/video/cx88/Makefile +++ b/drivers/media/video/cx88/Makefile | |||
| @@ -9,21 +9,12 @@ obj-$(CONFIG_VIDEO_CX88_DVB) += cx88-dvb.o | |||
| 9 | EXTRA_CFLAGS += -I$(src)/.. | 9 | EXTRA_CFLAGS += -I$(src)/.. |
| 10 | EXTRA_CFLAGS += -I$(srctree)/drivers/media/dvb/dvb-core | 10 | EXTRA_CFLAGS += -I$(srctree)/drivers/media/dvb/dvb-core |
| 11 | EXTRA_CFLAGS += -I$(srctree)/drivers/media/dvb/frontends | 11 | EXTRA_CFLAGS += -I$(srctree)/drivers/media/dvb/frontends |
| 12 | ifneq ($(CONFIG_VIDEO_BUF_DVB),n) | 12 | |
| 13 | EXTRA_CFLAGS += -DHAVE_VIDEO_BUF_DVB=1 | 13 | extra-cflags-$(CONFIG_VIDEO_BUF_DVB) += -DHAVE_VIDEO_BUF_DVB=1 |
| 14 | endif | 14 | extra-cflags-$(CONFIG_DVB_CX22702) += -DHAVE_CX22702=1 |
| 15 | ifneq ($(CONFIG_DVB_CX22702),n) | 15 | extra-cflags-$(CONFIG_DVB_OR51132) += -DHAVE_OR51132=1 |
| 16 | EXTRA_CFLAGS += -DHAVE_CX22702=1 | 16 | extra-cflags-$(CONFIG_DVB_LGDT330X) += -DHAVE_LGDT330X=1 |
| 17 | endif | 17 | extra-cflags-$(CONFIG_DVB_MT352) += -DHAVE_MT352=1 |
| 18 | ifneq ($(CONFIG_DVB_OR51132),n) | 18 | extra-cflags-$(CONFIG_DVB_NXT200X) += -DHAVE_NXT200X=1 |
| 19 | EXTRA_CFLAGS += -DHAVE_OR51132=1 | 19 | |
| 20 | endif | 20 | EXTRA_CFLAGS += $(extra-cflags-y) $(extra-cflags-m) |
| 21 | ifneq ($(CONFIG_DVB_LGDT330X),n) | ||
| 22 | EXTRA_CFLAGS += -DHAVE_LGDT330X=1 | ||
| 23 | endif | ||
| 24 | ifneq ($(CONFIG_DVB_MT352),n) | ||
| 25 | EXTRA_CFLAGS += -DHAVE_MT352=1 | ||
| 26 | endif | ||
| 27 | ifneq ($(CONFIG_DVB_NXT200X),n) | ||
| 28 | EXTRA_CFLAGS += -DHAVE_NXT200X=1 | ||
| 29 | endif | ||
diff --git a/drivers/media/video/saa7134/Kconfig b/drivers/media/video/saa7134/Kconfig index 7bdeabe638ca..c512c4411b38 100644 --- a/drivers/media/video/saa7134/Kconfig +++ b/drivers/media/video/saa7134/Kconfig | |||
| @@ -42,8 +42,8 @@ config VIDEO_SAA7134_DVB_ALL_FRONTENDS | |||
| 42 | If you are unsure, choose Y. | 42 | If you are unsure, choose Y. |
| 43 | 43 | ||
| 44 | config VIDEO_SAA7134_DVB_MT352 | 44 | config VIDEO_SAA7134_DVB_MT352 |
| 45 | tristate "Zarlink MT352 DVB-T Support" | 45 | bool "Zarlink MT352 DVB-T Support" |
| 46 | default m | 46 | default y |
| 47 | depends on VIDEO_SAA7134_DVB && !VIDEO_SAA7134_DVB_ALL_FRONTENDS | 47 | depends on VIDEO_SAA7134_DVB && !VIDEO_SAA7134_DVB_ALL_FRONTENDS |
| 48 | select DVB_MT352 | 48 | select DVB_MT352 |
| 49 | ---help--- | 49 | ---help--- |
| @@ -51,8 +51,8 @@ config VIDEO_SAA7134_DVB_MT352 | |||
| 51 | Philips saa7134 chip and the MT352 demodulator. | 51 | Philips saa7134 chip and the MT352 demodulator. |
| 52 | 52 | ||
| 53 | config VIDEO_SAA7134_DVB_TDA1004X | 53 | config VIDEO_SAA7134_DVB_TDA1004X |
| 54 | tristate "Phillips TDA10045H/TDA10046H DVB-T Support" | 54 | bool "Phillips TDA10045H/TDA10046H DVB-T Support" |
| 55 | default m | 55 | default y |
| 56 | depends on VIDEO_SAA7134_DVB && !VIDEO_SAA7134_DVB_ALL_FRONTENDS | 56 | depends on VIDEO_SAA7134_DVB && !VIDEO_SAA7134_DVB_ALL_FRONTENDS |
| 57 | select DVB_TDA1004X | 57 | select DVB_TDA1004X |
| 58 | ---help--- | 58 | ---help--- |
| @@ -60,8 +60,8 @@ config VIDEO_SAA7134_DVB_TDA1004X | |||
| 60 | Philips saa7134 chip and the TDA10045H/TDA10046H demodulator. | 60 | Philips saa7134 chip and the TDA10045H/TDA10046H demodulator. |
| 61 | 61 | ||
| 62 | config VIDEO_SAA7134_DVB_NXT200X | 62 | config VIDEO_SAA7134_DVB_NXT200X |
| 63 | tristate "NXT2002/NXT2004 ATSC Support" | 63 | bool "NXT2002/NXT2004 ATSC Support" |
| 64 | default m | 64 | default y |
| 65 | depends on VIDEO_SAA7134_DVB && !VIDEO_SAA7134_DVB_ALL_FRONTENDS | 65 | depends on VIDEO_SAA7134_DVB && !VIDEO_SAA7134_DVB_ALL_FRONTENDS |
| 66 | select DVB_NXT200X | 66 | select DVB_NXT200X |
| 67 | ---help--- | 67 | ---help--- |
diff --git a/drivers/media/video/saa7134/Makefile b/drivers/media/video/saa7134/Makefile index 4226b61cc613..134f83a96218 100644 --- a/drivers/media/video/saa7134/Makefile +++ b/drivers/media/video/saa7134/Makefile | |||
| @@ -11,15 +11,10 @@ obj-$(CONFIG_VIDEO_SAA7134_DVB) += saa7134-dvb.o | |||
| 11 | EXTRA_CFLAGS += -I$(src)/.. | 11 | EXTRA_CFLAGS += -I$(src)/.. |
| 12 | EXTRA_CFLAGS += -I$(srctree)/drivers/media/dvb/dvb-core | 12 | EXTRA_CFLAGS += -I$(srctree)/drivers/media/dvb/dvb-core |
| 13 | EXTRA_CFLAGS += -I$(srctree)/drivers/media/dvb/frontends | 13 | EXTRA_CFLAGS += -I$(srctree)/drivers/media/dvb/frontends |
| 14 | ifneq ($(CONFIG_VIDEO_BUF_DVB),n) | 14 | |
| 15 | EXTRA_CFLAGS += -DHAVE_VIDEO_BUF_DVB=1 | 15 | extra-cflags-$(CONFIG_VIDEO_BUF_DVB) += -DHAVE_VIDEO_BUF_DVB=1 |
| 16 | endif | 16 | extra-cflags-$(CONFIG_DVB_MT352) += -DHAVE_MT352=1 |
| 17 | ifneq ($(CONFIG_DVB_MT352),n) | 17 | extra-cflags-$(CONFIG_DVB_TDA1004X) += -DHAVE_TDA1004X=1 |
| 18 | EXTRA_CFLAGS += -DHAVE_MT352=1 | 18 | extra-cflags-$(CONFIG_DVB_NXT200X) += -DHAVE_NXT200X=1 |
| 19 | endif | 19 | |
| 20 | ifneq ($(CONFIG_DVB_TDA1004X),n) | 20 | EXTRA_CFLAGS += $(extra-cflags-y) $(extra-cflags-m) |
| 21 | EXTRA_CFLAGS += -DHAVE_TDA1004X=1 | ||
| 22 | endif | ||
| 23 | ifneq ($(CONFIG_DVB_NXT200X),n) | ||
| 24 | EXTRA_CFLAGS += -DHAVE_NXT200X=1 | ||
| 25 | endif | ||
diff --git a/drivers/message/i2o/pci.c b/drivers/message/i2o/pci.c index 66c03e882570..81ef306cb124 100644 --- a/drivers/message/i2o/pci.c +++ b/drivers/message/i2o/pci.c | |||
| @@ -421,8 +421,8 @@ static int __devinit i2o_pci_probe(struct pci_dev *pdev, | |||
| 421 | i2o_pci_free(c); | 421 | i2o_pci_free(c); |
| 422 | 422 | ||
| 423 | free_controller: | 423 | free_controller: |
| 424 | i2o_iop_free(c); | ||
| 425 | put_device(c->device.parent); | 424 | put_device(c->device.parent); |
| 425 | i2o_iop_free(c); | ||
| 426 | 426 | ||
| 427 | disable: | 427 | disable: |
| 428 | pci_disable_device(pdev); | 428 | pci_disable_device(pdev); |
diff --git a/drivers/pcmcia/m32r_cfc.c b/drivers/pcmcia/m32r_cfc.c index 2c22b4b3619d..078579ae6359 100644 --- a/drivers/pcmcia/m32r_cfc.c +++ b/drivers/pcmcia/m32r_cfc.c | |||
| @@ -355,9 +355,10 @@ static void add_pcc_socket(ulong base, int irq, ulong mapaddr, kio_addr_t ioaddr | |||
| 355 | #ifndef CONFIG_PLAT_USRV | 355 | #ifndef CONFIG_PLAT_USRV |
| 356 | /* insert interrupt */ | 356 | /* insert interrupt */ |
| 357 | request_irq(irq, pcc_interrupt, 0, "m32r_cfc", pcc_interrupt); | 357 | request_irq(irq, pcc_interrupt, 0, "m32r_cfc", pcc_interrupt); |
| 358 | #ifndef CONFIG_PLAT_MAPPI3 | ||
| 358 | /* eject interrupt */ | 359 | /* eject interrupt */ |
| 359 | request_irq(irq+1, pcc_interrupt, 0, "m32r_cfc", pcc_interrupt); | 360 | request_irq(irq+1, pcc_interrupt, 0, "m32r_cfc", pcc_interrupt); |
| 360 | 361 | #endif | |
| 361 | debug(3, "m32r_cfc: enable CFMSK, RDYSEL\n"); | 362 | debug(3, "m32r_cfc: enable CFMSK, RDYSEL\n"); |
| 362 | pcc_set(pcc_sockets, (unsigned int)PLD_CFIMASK, 0x01); | 363 | pcc_set(pcc_sockets, (unsigned int)PLD_CFIMASK, 0x01); |
| 363 | #endif /* CONFIG_PLAT_USRV */ | 364 | #endif /* CONFIG_PLAT_USRV */ |
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c index c28e3aea1c3c..418fc7b896ac 100644 --- a/drivers/scsi/dpt_i2o.c +++ b/drivers/scsi/dpt_i2o.c | |||
| @@ -816,7 +816,7 @@ static int adpt_hba_reset(adpt_hba* pHba) | |||
| 816 | static void adpt_i2o_sys_shutdown(void) | 816 | static void adpt_i2o_sys_shutdown(void) |
| 817 | { | 817 | { |
| 818 | adpt_hba *pHba, *pNext; | 818 | adpt_hba *pHba, *pNext; |
| 819 | struct adpt_i2o_post_wait_data *p1, *p2; | 819 | struct adpt_i2o_post_wait_data *p1, *old; |
| 820 | 820 | ||
| 821 | printk(KERN_INFO"Shutting down Adaptec I2O controllers.\n"); | 821 | printk(KERN_INFO"Shutting down Adaptec I2O controllers.\n"); |
| 822 | printk(KERN_INFO" This could take a few minutes if there are many devices attached\n"); | 822 | printk(KERN_INFO" This could take a few minutes if there are many devices attached\n"); |
| @@ -830,13 +830,14 @@ static void adpt_i2o_sys_shutdown(void) | |||
| 830 | } | 830 | } |
| 831 | 831 | ||
| 832 | /* Remove any timedout entries from the wait queue. */ | 832 | /* Remove any timedout entries from the wait queue. */ |
| 833 | p2 = NULL; | ||
| 834 | // spin_lock_irqsave(&adpt_post_wait_lock, flags); | 833 | // spin_lock_irqsave(&adpt_post_wait_lock, flags); |
| 835 | /* Nothing should be outstanding at this point so just | 834 | /* Nothing should be outstanding at this point so just |
| 836 | * free them | 835 | * free them |
| 837 | */ | 836 | */ |
| 838 | for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p2->next) { | 837 | for(p1 = adpt_post_wait_queue; p1;) { |
| 839 | kfree(p1); | 838 | old = p1; |
| 839 | p1 = p1->next; | ||
| 840 | kfree(old); | ||
| 840 | } | 841 | } |
| 841 | // spin_unlock_irqrestore(&adpt_post_wait_lock, flags); | 842 | // spin_unlock_irqrestore(&adpt_post_wait_lock, flags); |
| 842 | adpt_post_wait_queue = NULL; | 843 | adpt_post_wait_queue = NULL; |
diff --git a/drivers/video/console/fbcon_ccw.c b/drivers/video/console/fbcon_ccw.c index 3afd1eeb1ade..4952b66ae206 100644 --- a/drivers/video/console/fbcon_ccw.c +++ b/drivers/video/console/fbcon_ccw.c | |||
| @@ -34,7 +34,7 @@ static inline void ccw_update_attr(u8 *dst, u8 *src, int attribute, | |||
| 34 | msk <<= (8 - mod); | 34 | msk <<= (8 - mod); |
| 35 | 35 | ||
| 36 | if (offset > mod) | 36 | if (offset > mod) |
| 37 | set_bit(FBCON_BIT(7), (void *)&msk1); | 37 | msk1 |= 0x01; |
| 38 | 38 | ||
| 39 | for (i = 0; i < vc->vc_font.width; i++) { | 39 | for (i = 0; i < vc->vc_font.width; i++) { |
| 40 | for (j = 0; j < width; j++) { | 40 | for (j = 0; j < width; j++) { |
diff --git a/drivers/video/console/fbcon_rotate.h b/drivers/video/console/fbcon_rotate.h index e504fbf5c604..1b8f92fdc6a8 100644 --- a/drivers/video/console/fbcon_rotate.h +++ b/drivers/video/console/fbcon_rotate.h | |||
| @@ -21,21 +21,13 @@ | |||
| 21 | (s == SCROLL_REDRAW || s == SCROLL_MOVE || !(i)->fix.xpanstep) ? \ | 21 | (s == SCROLL_REDRAW || s == SCROLL_MOVE || !(i)->fix.xpanstep) ? \ |
| 22 | (i)->var.xres : (i)->var.xres_virtual; }) | 22 | (i)->var.xres : (i)->var.xres_virtual; }) |
| 23 | 23 | ||
| 24 | /* | ||
| 25 | * The bitmap is always big endian | ||
| 26 | */ | ||
| 27 | #if defined(__LITTLE_ENDIAN) | ||
| 28 | #define FBCON_BIT(b) (7 - (b)) | ||
| 29 | #else | ||
| 30 | #define FBCON_BIT(b) (b) | ||
| 31 | #endif | ||
| 32 | 24 | ||
| 33 | static inline int pattern_test_bit(u32 x, u32 y, u32 pitch, const char *pat) | 25 | static inline int pattern_test_bit(u32 x, u32 y, u32 pitch, const char *pat) |
| 34 | { | 26 | { |
| 35 | u32 tmp = (y * pitch) + x, index = tmp / 8, bit = tmp % 8; | 27 | u32 tmp = (y * pitch) + x, index = tmp / 8, bit = tmp % 8; |
| 36 | 28 | ||
| 37 | pat +=index; | 29 | pat +=index; |
| 38 | return (test_bit(FBCON_BIT(bit), (void *)pat)); | 30 | return (*pat) & (0x80 >> bit); |
| 39 | } | 31 | } |
| 40 | 32 | ||
| 41 | static inline void pattern_set_bit(u32 x, u32 y, u32 pitch, char *pat) | 33 | static inline void pattern_set_bit(u32 x, u32 y, u32 pitch, char *pat) |
| @@ -43,7 +35,8 @@ static inline void pattern_set_bit(u32 x, u32 y, u32 pitch, char *pat) | |||
| 43 | u32 tmp = (y * pitch) + x, index = tmp / 8, bit = tmp % 8; | 35 | u32 tmp = (y * pitch) + x, index = tmp / 8, bit = tmp % 8; |
| 44 | 36 | ||
| 45 | pat += index; | 37 | pat += index; |
| 46 | set_bit(FBCON_BIT(bit), (void *)pat); | 38 | |
| 39 | (*pat) |= 0x80 >> bit; | ||
| 47 | } | 40 | } |
| 48 | 41 | ||
| 49 | static inline void rotate_ud(const char *in, char *out, u32 width, u32 height) | 42 | static inline void rotate_ud(const char *in, char *out, u32 width, u32 height) |
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index be7288184fa9..0ea965c3bb7d 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c | |||
| @@ -427,6 +427,8 @@ v9fs_create(struct inode *dir, | |||
| 427 | 427 | ||
| 428 | v9fs_mistat2inode(fcall->params.rstat.stat, file_inode, sb); | 428 | v9fs_mistat2inode(fcall->params.rstat.stat, file_inode, sb); |
| 429 | kfree(fcall); | 429 | kfree(fcall); |
| 430 | fcall = NULL; | ||
| 431 | file_dentry->d_op = &v9fs_dentry_operations; | ||
| 430 | d_instantiate(file_dentry, file_inode); | 432 | d_instantiate(file_dentry, file_inode); |
| 431 | 433 | ||
| 432 | if (perm & V9FS_DMDIR) { | 434 | if (perm & V9FS_DMDIR) { |
diff --git a/fs/dquot.c b/fs/dquot.c index 05b60283c9c2..2a62b3dc20ec 100644 --- a/fs/dquot.c +++ b/fs/dquot.c | |||
| @@ -1513,10 +1513,16 @@ int vfs_quota_on_mount(struct super_block *sb, char *qf_name, | |||
| 1513 | if (IS_ERR(dentry)) | 1513 | if (IS_ERR(dentry)) |
| 1514 | return PTR_ERR(dentry); | 1514 | return PTR_ERR(dentry); |
| 1515 | 1515 | ||
| 1516 | if (!dentry->d_inode) { | ||
| 1517 | error = -ENOENT; | ||
| 1518 | goto out; | ||
| 1519 | } | ||
| 1520 | |||
| 1516 | error = security_quota_on(dentry); | 1521 | error = security_quota_on(dentry); |
| 1517 | if (!error) | 1522 | if (!error) |
| 1518 | error = vfs_quota_on_inode(dentry->d_inode, type, format_id); | 1523 | error = vfs_quota_on_inode(dentry->d_inode, type, format_id); |
| 1519 | 1524 | ||
| 1525 | out: | ||
| 1520 | dput(dentry); | 1526 | dput(dentry); |
| 1521 | return error; | 1527 | return error; |
| 1522 | } | 1528 | } |
diff --git a/fs/ext3/resize.c b/fs/ext3/resize.c index 1be78b4b4de9..6104ad310507 100644 --- a/fs/ext3/resize.c +++ b/fs/ext3/resize.c | |||
| @@ -767,6 +767,7 @@ int ext3_group_add(struct super_block *sb, struct ext3_new_group_data *input) | |||
| 767 | if (input->group != EXT3_SB(sb)->s_groups_count) { | 767 | if (input->group != EXT3_SB(sb)->s_groups_count) { |
| 768 | ext3_warning(sb, __FUNCTION__, | 768 | ext3_warning(sb, __FUNCTION__, |
| 769 | "multiple resizers run on filesystem!\n"); | 769 | "multiple resizers run on filesystem!\n"); |
| 770 | err = -EBUSY; | ||
| 770 | goto exit_journal; | 771 | goto exit_journal; |
| 771 | } | 772 | } |
| 772 | 773 | ||
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index c045cc70c749..51f5da652771 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c | |||
| @@ -74,6 +74,24 @@ static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd) | |||
| 74 | return 1; | 74 | return 1; |
| 75 | } | 75 | } |
| 76 | 76 | ||
| 77 | static int dir_alias(struct inode *inode) | ||
| 78 | { | ||
| 79 | if (S_ISDIR(inode->i_mode)) { | ||
| 80 | /* Don't allow creating an alias to a directory */ | ||
| 81 | struct dentry *alias = d_find_alias(inode); | ||
| 82 | if (alias) { | ||
| 83 | dput(alias); | ||
| 84 | return 1; | ||
| 85 | } | ||
| 86 | } | ||
| 87 | return 0; | ||
| 88 | } | ||
| 89 | |||
| 90 | static inline int invalid_nodeid(u64 nodeid) | ||
| 91 | { | ||
| 92 | return !nodeid || nodeid == FUSE_ROOT_ID; | ||
| 93 | } | ||
| 94 | |||
| 77 | static struct dentry_operations fuse_dentry_operations = { | 95 | static struct dentry_operations fuse_dentry_operations = { |
| 78 | .d_revalidate = fuse_dentry_revalidate, | 96 | .d_revalidate = fuse_dentry_revalidate, |
| 79 | }; | 97 | }; |
| @@ -97,7 +115,7 @@ static int fuse_lookup_iget(struct inode *dir, struct dentry *entry, | |||
| 97 | fuse_lookup_init(req, dir, entry, &outarg); | 115 | fuse_lookup_init(req, dir, entry, &outarg); |
| 98 | request_send(fc, req); | 116 | request_send(fc, req); |
| 99 | err = req->out.h.error; | 117 | err = req->out.h.error; |
| 100 | if (!err && (!outarg.nodeid || outarg.nodeid == FUSE_ROOT_ID)) | 118 | if (!err && invalid_nodeid(outarg.nodeid)) |
| 101 | err = -EIO; | 119 | err = -EIO; |
| 102 | if (!err) { | 120 | if (!err) { |
| 103 | inode = fuse_iget(dir->i_sb, outarg.nodeid, outarg.generation, | 121 | inode = fuse_iget(dir->i_sb, outarg.nodeid, outarg.generation, |
| @@ -193,7 +211,7 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode, | |||
| 193 | } | 211 | } |
| 194 | 212 | ||
| 195 | err = -EIO; | 213 | err = -EIO; |
| 196 | if (!S_ISREG(outentry.attr.mode)) | 214 | if (!S_ISREG(outentry.attr.mode) || invalid_nodeid(outentry.nodeid)) |
| 197 | goto out_free_ff; | 215 | goto out_free_ff; |
| 198 | 216 | ||
| 199 | inode = fuse_iget(dir->i_sb, outentry.nodeid, outentry.generation, | 217 | inode = fuse_iget(dir->i_sb, outentry.nodeid, outentry.generation, |
| @@ -250,7 +268,7 @@ static int create_new_entry(struct fuse_conn *fc, struct fuse_req *req, | |||
| 250 | fuse_put_request(fc, req); | 268 | fuse_put_request(fc, req); |
| 251 | return err; | 269 | return err; |
| 252 | } | 270 | } |
| 253 | if (!outarg.nodeid || outarg.nodeid == FUSE_ROOT_ID) { | 271 | if (invalid_nodeid(outarg.nodeid)) { |
| 254 | fuse_put_request(fc, req); | 272 | fuse_put_request(fc, req); |
| 255 | return -EIO; | 273 | return -EIO; |
| 256 | } | 274 | } |
| @@ -263,7 +281,7 @@ static int create_new_entry(struct fuse_conn *fc, struct fuse_req *req, | |||
| 263 | fuse_put_request(fc, req); | 281 | fuse_put_request(fc, req); |
| 264 | 282 | ||
| 265 | /* Don't allow userspace to do really stupid things... */ | 283 | /* Don't allow userspace to do really stupid things... */ |
| 266 | if ((inode->i_mode ^ mode) & S_IFMT) { | 284 | if (((inode->i_mode ^ mode) & S_IFMT) || dir_alias(inode)) { |
| 267 | iput(inode); | 285 | iput(inode); |
| 268 | return -EIO; | 286 | return -EIO; |
| 269 | } | 287 | } |
| @@ -874,14 +892,9 @@ static struct dentry *fuse_lookup(struct inode *dir, struct dentry *entry, | |||
| 874 | err = fuse_lookup_iget(dir, entry, &inode); | 892 | err = fuse_lookup_iget(dir, entry, &inode); |
| 875 | if (err) | 893 | if (err) |
| 876 | return ERR_PTR(err); | 894 | return ERR_PTR(err); |
| 877 | if (inode && S_ISDIR(inode->i_mode)) { | 895 | if (inode && dir_alias(inode)) { |
| 878 | /* Don't allow creating an alias to a directory */ | 896 | iput(inode); |
| 879 | struct dentry *alias = d_find_alias(inode); | 897 | return ERR_PTR(-EIO); |
| 880 | if (alias) { | ||
| 881 | dput(alias); | ||
| 882 | iput(inode); | ||
| 883 | return ERR_PTR(-EIO); | ||
| 884 | } | ||
| 885 | } | 898 | } |
| 886 | d_add(entry, inode); | 899 | d_add(entry, inode); |
| 887 | return NULL; | 900 | return NULL; |
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 6391d8964214..aaab1a5ac461 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c | |||
| @@ -643,14 +643,11 @@ static int nfs_show_options(struct seq_file *m, struct vfsmount *mnt) | |||
| 643 | /* | 643 | /* |
| 644 | * Invalidate the local caches | 644 | * Invalidate the local caches |
| 645 | */ | 645 | */ |
| 646 | void | 646 | static void nfs_zap_caches_locked(struct inode *inode) |
| 647 | nfs_zap_caches(struct inode *inode) | ||
| 648 | { | 647 | { |
| 649 | struct nfs_inode *nfsi = NFS_I(inode); | 648 | struct nfs_inode *nfsi = NFS_I(inode); |
| 650 | int mode = inode->i_mode; | 649 | int mode = inode->i_mode; |
| 651 | 650 | ||
| 652 | spin_lock(&inode->i_lock); | ||
| 653 | |||
| 654 | NFS_ATTRTIMEO(inode) = NFS_MINATTRTIMEO(inode); | 651 | NFS_ATTRTIMEO(inode) = NFS_MINATTRTIMEO(inode); |
| 655 | NFS_ATTRTIMEO_UPDATE(inode) = jiffies; | 652 | NFS_ATTRTIMEO_UPDATE(inode) = jiffies; |
| 656 | 653 | ||
| @@ -659,7 +656,12 @@ nfs_zap_caches(struct inode *inode) | |||
| 659 | nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE; | 656 | nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE; |
| 660 | else | 657 | else |
| 661 | nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE; | 658 | nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE; |
| 659 | } | ||
| 662 | 660 | ||
| 661 | void nfs_zap_caches(struct inode *inode) | ||
| 662 | { | ||
| 663 | spin_lock(&inode->i_lock); | ||
| 664 | nfs_zap_caches_locked(inode); | ||
| 663 | spin_unlock(&inode->i_lock); | 665 | spin_unlock(&inode->i_lock); |
| 664 | } | 666 | } |
| 665 | 667 | ||
| @@ -676,16 +678,13 @@ static void nfs_zap_acl_cache(struct inode *inode) | |||
| 676 | } | 678 | } |
| 677 | 679 | ||
| 678 | /* | 680 | /* |
| 679 | * Invalidate, but do not unhash, the inode | 681 | * Invalidate, but do not unhash, the inode. |
| 682 | * NB: must be called with inode->i_lock held! | ||
| 680 | */ | 683 | */ |
| 681 | static void | 684 | static void nfs_invalidate_inode(struct inode *inode) |
| 682 | nfs_invalidate_inode(struct inode *inode) | ||
| 683 | { | 685 | { |
| 684 | umode_t save_mode = inode->i_mode; | 686 | set_bit(NFS_INO_STALE, &NFS_FLAGS(inode)); |
| 685 | 687 | nfs_zap_caches_locked(inode); | |
| 686 | make_bad_inode(inode); | ||
| 687 | inode->i_mode = save_mode; | ||
| 688 | nfs_zap_caches(inode); | ||
| 689 | } | 688 | } |
| 690 | 689 | ||
| 691 | struct nfs_find_desc { | 690 | struct nfs_find_desc { |
| @@ -1528,14 +1527,13 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr, unsign | |||
| 1528 | printk(KERN_DEBUG "%s: inode %ld mode changed, %07o to %07o\n", | 1527 | printk(KERN_DEBUG "%s: inode %ld mode changed, %07o to %07o\n", |
| 1529 | __FUNCTION__, inode->i_ino, inode->i_mode, fattr->mode); | 1528 | __FUNCTION__, inode->i_ino, inode->i_mode, fattr->mode); |
| 1530 | #endif | 1529 | #endif |
| 1530 | out_err: | ||
| 1531 | /* | 1531 | /* |
| 1532 | * No need to worry about unhashing the dentry, as the | 1532 | * No need to worry about unhashing the dentry, as the |
| 1533 | * lookup validation will know that the inode is bad. | 1533 | * lookup validation will know that the inode is bad. |
| 1534 | * (But we fall through to invalidate the caches.) | 1534 | * (But we fall through to invalidate the caches.) |
| 1535 | */ | 1535 | */ |
| 1536 | nfs_invalidate_inode(inode); | 1536 | nfs_invalidate_inode(inode); |
| 1537 | out_err: | ||
| 1538 | set_bit(NFS_INO_STALE, &NFS_FLAGS(inode)); | ||
| 1539 | return -ESTALE; | 1537 | return -ESTALE; |
| 1540 | } | 1538 | } |
| 1541 | 1539 | ||
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 21482b2518f6..60e0dd800cc3 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
| @@ -3071,15 +3071,15 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock | |||
| 3071 | struct nfs4_client *clp = state->owner->so_client; | 3071 | struct nfs4_client *clp = state->owner->so_client; |
| 3072 | int status; | 3072 | int status; |
| 3073 | 3073 | ||
| 3074 | down_read(&clp->cl_sem); | ||
| 3075 | /* Is this a delegated open? */ | 3074 | /* Is this a delegated open? */ |
| 3076 | if (test_bit(NFS_DELEGATED_STATE, &state->flags)) { | 3075 | if (NFS_I(state->inode)->delegation_state != 0) { |
| 3077 | /* Yes: cache locks! */ | 3076 | /* Yes: cache locks! */ |
| 3078 | status = do_vfs_lock(request->fl_file, request); | 3077 | status = do_vfs_lock(request->fl_file, request); |
| 3079 | /* ...but avoid races with delegation recall... */ | 3078 | /* ...but avoid races with delegation recall... */ |
| 3080 | if (status < 0 || test_bit(NFS_DELEGATED_STATE, &state->flags)) | 3079 | if (status < 0 || test_bit(NFS_DELEGATED_STATE, &state->flags)) |
| 3081 | goto out; | 3080 | return status; |
| 3082 | } | 3081 | } |
| 3082 | down_read(&clp->cl_sem); | ||
| 3083 | status = nfs4_set_lock_state(state, request); | 3083 | status = nfs4_set_lock_state(state, request); |
| 3084 | if (status != 0) | 3084 | if (status != 0) |
| 3085 | goto out; | 3085 | goto out; |
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 0675f3215e0a..5ef4c57618fe 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c | |||
| @@ -644,12 +644,15 @@ void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t f | |||
| 644 | 644 | ||
| 645 | struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter) | 645 | struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter) |
| 646 | { | 646 | { |
| 647 | struct rpc_sequence *sequence = counter->sequence; | ||
| 647 | struct nfs_seqid *new; | 648 | struct nfs_seqid *new; |
| 648 | 649 | ||
| 649 | new = kmalloc(sizeof(*new), GFP_KERNEL); | 650 | new = kmalloc(sizeof(*new), GFP_KERNEL); |
| 650 | if (new != NULL) { | 651 | if (new != NULL) { |
| 651 | new->sequence = counter; | 652 | new->sequence = counter; |
| 652 | INIT_LIST_HEAD(&new->list); | 653 | spin_lock(&sequence->lock); |
| 654 | list_add_tail(&new->list, &sequence->list); | ||
| 655 | spin_unlock(&sequence->lock); | ||
| 653 | } | 656 | } |
| 654 | return new; | 657 | return new; |
| 655 | } | 658 | } |
| @@ -658,12 +661,10 @@ void nfs_free_seqid(struct nfs_seqid *seqid) | |||
| 658 | { | 661 | { |
| 659 | struct rpc_sequence *sequence = seqid->sequence->sequence; | 662 | struct rpc_sequence *sequence = seqid->sequence->sequence; |
| 660 | 663 | ||
| 661 | if (!list_empty(&seqid->list)) { | 664 | spin_lock(&sequence->lock); |
| 662 | spin_lock(&sequence->lock); | 665 | list_del(&seqid->list); |
| 663 | list_del(&seqid->list); | 666 | spin_unlock(&sequence->lock); |
| 664 | spin_unlock(&sequence->lock); | 667 | rpc_wake_up(&sequence->wait); |
| 665 | } | ||
| 666 | rpc_wake_up_next(&sequence->wait); | ||
| 667 | kfree(seqid); | 668 | kfree(seqid); |
| 668 | } | 669 | } |
| 669 | 670 | ||
| @@ -722,11 +723,10 @@ int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task) | |||
| 722 | if (sequence->list.next == &seqid->list) | 723 | if (sequence->list.next == &seqid->list) |
| 723 | goto out; | 724 | goto out; |
| 724 | spin_lock(&sequence->lock); | 725 | spin_lock(&sequence->lock); |
| 725 | if (!list_empty(&sequence->list)) { | 726 | if (sequence->list.next != &seqid->list) { |
| 726 | rpc_sleep_on(&sequence->wait, task, NULL, NULL); | 727 | rpc_sleep_on(&sequence->wait, task, NULL, NULL); |
| 727 | status = -EAGAIN; | 728 | status = -EAGAIN; |
| 728 | } else | 729 | } |
| 729 | list_add(&seqid->list, &sequence->list); | ||
| 730 | spin_unlock(&sequence->lock); | 730 | spin_unlock(&sequence->lock); |
| 731 | out: | 731 | out: |
| 732 | return status; | 732 | return status; |
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 9ab97cef0daa..50bd5a8f0446 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c | |||
| @@ -402,12 +402,11 @@ struct numa_maps { | |||
| 402 | /* | 402 | /* |
| 403 | * Calculate numa node maps for a vma | 403 | * Calculate numa node maps for a vma |
| 404 | */ | 404 | */ |
| 405 | static struct numa_maps *get_numa_maps(const struct vm_area_struct *vma) | 405 | static struct numa_maps *get_numa_maps(struct vm_area_struct *vma) |
| 406 | { | 406 | { |
| 407 | int i; | ||
| 407 | struct page *page; | 408 | struct page *page; |
| 408 | unsigned long vaddr; | 409 | unsigned long vaddr; |
| 409 | struct mm_struct *mm = vma->vm_mm; | ||
| 410 | int i; | ||
| 411 | struct numa_maps *md = kmalloc(sizeof(struct numa_maps), GFP_KERNEL); | 410 | struct numa_maps *md = kmalloc(sizeof(struct numa_maps), GFP_KERNEL); |
| 412 | 411 | ||
| 413 | if (!md) | 412 | if (!md) |
| @@ -420,7 +419,7 @@ static struct numa_maps *get_numa_maps(const struct vm_area_struct *vma) | |||
| 420 | md->node[i] =0; | 419 | md->node[i] =0; |
| 421 | 420 | ||
| 422 | for (vaddr = vma->vm_start; vaddr < vma->vm_end; vaddr += PAGE_SIZE) { | 421 | for (vaddr = vma->vm_start; vaddr < vma->vm_end; vaddr += PAGE_SIZE) { |
| 423 | page = follow_page(mm, vaddr, 0); | 422 | page = follow_page(vma, vaddr, 0); |
| 424 | if (page) { | 423 | if (page) { |
| 425 | int count = page_mapcount(page); | 424 | int count = page_mapcount(page); |
| 426 | 425 | ||
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index 5f82352b97e1..0a044ad98885 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c | |||
| @@ -2194,7 +2194,7 @@ static int map_block_for_writepage(struct inode *inode, | |||
| 2194 | INITIALIZE_PATH(path); | 2194 | INITIALIZE_PATH(path); |
| 2195 | int pos_in_item; | 2195 | int pos_in_item; |
| 2196 | int jbegin_count = JOURNAL_PER_BALANCE_CNT; | 2196 | int jbegin_count = JOURNAL_PER_BALANCE_CNT; |
| 2197 | loff_t byte_offset = (block << inode->i_sb->s_blocksize_bits) + 1; | 2197 | loff_t byte_offset = ((loff_t)block << inode->i_sb->s_blocksize_bits)+1; |
| 2198 | int retval; | 2198 | int retval; |
| 2199 | int use_get_block = 0; | 2199 | int use_get_block = 0; |
| 2200 | int bytes_copied = 0; | 2200 | int bytes_copied = 0; |
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index c6108971b4e6..94d3cdfbf9b8 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c | |||
| @@ -941,13 +941,12 @@ __linvfs_get_block( | |||
| 941 | int retpbbm = 1; | 941 | int retpbbm = 1; |
| 942 | int error; | 942 | int error; |
| 943 | 943 | ||
| 944 | if (blocks) { | ||
| 945 | offset = blocks << inode->i_blkbits; /* 64 bit goodness */ | ||
| 946 | size = (ssize_t) min_t(xfs_off_t, offset, LONG_MAX); | ||
| 947 | } else { | ||
| 948 | size = 1 << inode->i_blkbits; | ||
| 949 | } | ||
| 950 | offset = (xfs_off_t)iblock << inode->i_blkbits; | 944 | offset = (xfs_off_t)iblock << inode->i_blkbits; |
| 945 | if (blocks) | ||
| 946 | size = (ssize_t) min_t(xfs_off_t, LONG_MAX, | ||
| 947 | (xfs_off_t)blocks << inode->i_blkbits); | ||
| 948 | else | ||
| 949 | size = 1 << inode->i_blkbits; | ||
| 951 | 950 | ||
| 952 | VOP_BMAP(vp, offset, size, | 951 | VOP_BMAP(vp, offset, size, |
| 953 | create ? flags : BMAPI_READ, &iomap, &retpbbm, error); | 952 | create ? flags : BMAPI_READ, &iomap, &retpbbm, error); |
| @@ -1007,7 +1006,7 @@ __linvfs_get_block( | |||
| 1007 | ASSERT(iomap.iomap_bsize - iomap.iomap_delta > 0); | 1006 | ASSERT(iomap.iomap_bsize - iomap.iomap_delta > 0); |
| 1008 | offset = min_t(xfs_off_t, | 1007 | offset = min_t(xfs_off_t, |
| 1009 | iomap.iomap_bsize - iomap.iomap_delta, | 1008 | iomap.iomap_bsize - iomap.iomap_delta, |
| 1010 | blocks << inode->i_blkbits); | 1009 | (xfs_off_t)blocks << inode->i_blkbits); |
| 1011 | bh_result->b_size = (u32) min_t(xfs_off_t, UINT_MAX, offset); | 1010 | bh_result->b_size = (u32) min_t(xfs_off_t, UINT_MAX, offset); |
| 1012 | } | 1011 | } |
| 1013 | 1012 | ||
diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c index 35e557b00db2..1c7421840c18 100644 --- a/fs/xfs/xfs_attr_leaf.c +++ b/fs/xfs/xfs_attr_leaf.c | |||
| @@ -310,7 +310,8 @@ xfs_attr_shortform_remove(xfs_da_args_t *args) | |||
| 310 | * Fix up the start offset of the attribute fork | 310 | * Fix up the start offset of the attribute fork |
| 311 | */ | 311 | */ |
| 312 | totsize -= size; | 312 | totsize -= size; |
| 313 | if (totsize == sizeof(xfs_attr_sf_hdr_t) && !args->addname) { | 313 | if (totsize == sizeof(xfs_attr_sf_hdr_t) && !args->addname && |
| 314 | !(mp->m_flags & XFS_MOUNT_COMPAT_ATTR)) { | ||
| 314 | /* | 315 | /* |
| 315 | * Last attribute now removed, revert to original | 316 | * Last attribute now removed, revert to original |
| 316 | * inode format making all literal area available | 317 | * inode format making all literal area available |
| @@ -328,7 +329,8 @@ xfs_attr_shortform_remove(xfs_da_args_t *args) | |||
| 328 | xfs_idata_realloc(dp, -size, XFS_ATTR_FORK); | 329 | xfs_idata_realloc(dp, -size, XFS_ATTR_FORK); |
| 329 | dp->i_d.di_forkoff = xfs_attr_shortform_bytesfit(dp, totsize); | 330 | dp->i_d.di_forkoff = xfs_attr_shortform_bytesfit(dp, totsize); |
| 330 | ASSERT(dp->i_d.di_forkoff); | 331 | ASSERT(dp->i_d.di_forkoff); |
| 331 | ASSERT(totsize > sizeof(xfs_attr_sf_hdr_t) || args->addname); | 332 | ASSERT(totsize > sizeof(xfs_attr_sf_hdr_t) || args->addname || |
| 333 | (mp->m_flags & XFS_MOUNT_COMPAT_ATTR)); | ||
| 332 | dp->i_afp->if_ext_max = | 334 | dp->i_afp->if_ext_max = |
| 333 | XFS_IFORK_ASIZE(dp) / (uint)sizeof(xfs_bmbt_rec_t); | 335 | XFS_IFORK_ASIZE(dp) / (uint)sizeof(xfs_bmbt_rec_t); |
| 334 | dp->i_df.if_ext_max = | 336 | dp->i_df.if_ext_max = |
| @@ -737,7 +739,8 @@ xfs_attr_shortform_allfit(xfs_dabuf_t *bp, xfs_inode_t *dp) | |||
| 737 | + name_loc->namelen | 739 | + name_loc->namelen |
| 738 | + INT_GET(name_loc->valuelen, ARCH_CONVERT); | 740 | + INT_GET(name_loc->valuelen, ARCH_CONVERT); |
| 739 | } | 741 | } |
| 740 | if (bytes == sizeof(struct xfs_attr_sf_hdr)) | 742 | if (!(dp->i_mount->m_flags & XFS_MOUNT_COMPAT_ATTR) && |
| 743 | (bytes == sizeof(struct xfs_attr_sf_hdr))) | ||
| 741 | return(-1); | 744 | return(-1); |
| 742 | return(xfs_attr_shortform_bytesfit(dp, bytes)); | 745 | return(xfs_attr_shortform_bytesfit(dp, bytes)); |
| 743 | } | 746 | } |
| @@ -775,6 +778,8 @@ xfs_attr_leaf_to_shortform(xfs_dabuf_t *bp, xfs_da_args_t *args, int forkoff) | |||
| 775 | goto out; | 778 | goto out; |
| 776 | 779 | ||
| 777 | if (forkoff == -1) { | 780 | if (forkoff == -1) { |
| 781 | ASSERT(!(dp->i_mount->m_flags & XFS_MOUNT_COMPAT_ATTR)); | ||
| 782 | |||
| 778 | /* | 783 | /* |
| 779 | * Last attribute was removed, revert to original | 784 | * Last attribute was removed, revert to original |
| 780 | * inode format making all literal area available | 785 | * inode format making all literal area available |
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c index 7ceabd0e2d9d..d1236d6f4045 100644 --- a/fs/xfs/xfs_fsops.c +++ b/fs/xfs/xfs_fsops.c | |||
| @@ -550,7 +550,7 @@ xfs_fs_goingdown( | |||
| 550 | struct vfs *vfsp = XFS_MTOVFS(mp); | 550 | struct vfs *vfsp = XFS_MTOVFS(mp); |
| 551 | struct super_block *sb = freeze_bdev(vfsp->vfs_super->s_bdev); | 551 | struct super_block *sb = freeze_bdev(vfsp->vfs_super->s_bdev); |
| 552 | 552 | ||
| 553 | if (sb) { | 553 | if (sb && !IS_ERR(sb)) { |
| 554 | xfs_force_shutdown(mp, XFS_FORCE_UMOUNT); | 554 | xfs_force_shutdown(mp, XFS_FORCE_UMOUNT); |
| 555 | thaw_bdev(sb->s_bdev, sb); | 555 | thaw_bdev(sb->s_bdev, sb); |
| 556 | } | 556 | } |
diff --git a/fs/xfs/xfs_iomap.h b/fs/xfs/xfs_iomap.h index fcd6d63bb68b..3ce204a524b0 100644 --- a/fs/xfs/xfs_iomap.h +++ b/fs/xfs/xfs_iomap.h | |||
| @@ -69,7 +69,7 @@ typedef struct xfs_iomap { | |||
| 69 | xfs_buftarg_t *iomap_target; | 69 | xfs_buftarg_t *iomap_target; |
| 70 | xfs_off_t iomap_offset; /* offset of mapping, bytes */ | 70 | xfs_off_t iomap_offset; /* offset of mapping, bytes */ |
| 71 | xfs_off_t iomap_bsize; /* size of mapping, bytes */ | 71 | xfs_off_t iomap_bsize; /* size of mapping, bytes */ |
| 72 | size_t iomap_delta; /* offset into mapping, bytes */ | 72 | xfs_off_t iomap_delta; /* offset into mapping, bytes */ |
| 73 | iomap_flags_t iomap_flags; | 73 | iomap_flags_t iomap_flags; |
| 74 | } xfs_iomap_t; | 74 | } xfs_iomap_t; |
| 75 | 75 | ||
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h index 8f285149681f..4518b188ade6 100644 --- a/fs/xfs/xfs_log_priv.h +++ b/fs/xfs/xfs_log_priv.h | |||
| @@ -494,10 +494,8 @@ typedef struct log { | |||
| 494 | 494 | ||
| 495 | #define XLOG_FORCED_SHUTDOWN(log) ((log)->l_flags & XLOG_IO_ERROR) | 495 | #define XLOG_FORCED_SHUTDOWN(log) ((log)->l_flags & XLOG_IO_ERROR) |
| 496 | 496 | ||
| 497 | #define XLOG_GRANT_SUB_SPACE(log,bytes,type) \ | 497 | #define XLOG_GRANT_SUB_SPACE(log,bytes,type) \ |
| 498 | xlog_grant_sub_space(log,bytes,type) | 498 | { \ |
| 499 | static inline void xlog_grant_sub_space(struct log *log, int bytes, int type) | ||
| 500 | { | ||
| 501 | if (type == 'w') { \ | 499 | if (type == 'w') { \ |
| 502 | (log)->l_grant_write_bytes -= (bytes); \ | 500 | (log)->l_grant_write_bytes -= (bytes); \ |
| 503 | if ((log)->l_grant_write_bytes < 0) { \ | 501 | if ((log)->l_grant_write_bytes < 0) { \ |
| @@ -511,13 +509,9 @@ static inline void xlog_grant_sub_space(struct log *log, int bytes, int type) | |||
| 511 | (log)->l_grant_reserve_cycle--; \ | 509 | (log)->l_grant_reserve_cycle--; \ |
| 512 | } \ | 510 | } \ |
| 513 | } \ | 511 | } \ |
| 514 | } | 512 | } |
| 515 | 513 | #define XLOG_GRANT_ADD_SPACE(log,bytes,type) \ | |
| 516 | #define XLOG_GRANT_ADD_SPACE(log,bytes,type) \ | 514 | { \ |
| 517 | xlog_grant_add_space(log,bytes,type) | ||
| 518 | static inline void | ||
| 519 | xlog_grant_add_space(struct log *log, int bytes, int type) | ||
| 520 | { | ||
| 521 | if (type == 'w') { \ | 515 | if (type == 'w') { \ |
| 522 | (log)->l_grant_write_bytes += (bytes); \ | 516 | (log)->l_grant_write_bytes += (bytes); \ |
| 523 | if ((log)->l_grant_write_bytes > (log)->l_logsize) { \ | 517 | if ((log)->l_grant_write_bytes > (log)->l_logsize) { \ |
| @@ -531,12 +525,9 @@ xlog_grant_add_space(struct log *log, int bytes, int type) | |||
| 531 | (log)->l_grant_reserve_cycle++; \ | 525 | (log)->l_grant_reserve_cycle++; \ |
| 532 | } \ | 526 | } \ |
| 533 | } \ | 527 | } \ |
| 534 | } | 528 | } |
| 535 | 529 | #define XLOG_INS_TICKETQ(q, tic) \ | |
| 536 | #define XLOG_INS_TICKETQ(q, tic) xlog_ins_ticketq(q, tic) | 530 | { \ |
| 537 | static inline void | ||
| 538 | xlog_ins_ticketq(struct xlog_ticket *q, struct xlog_ticket *tic) | ||
| 539 | { \ | ||
| 540 | if (q) { \ | 531 | if (q) { \ |
| 541 | (tic)->t_next = (q); \ | 532 | (tic)->t_next = (q); \ |
| 542 | (tic)->t_prev = (q)->t_prev; \ | 533 | (tic)->t_prev = (q)->t_prev; \ |
| @@ -547,12 +538,9 @@ xlog_ins_ticketq(struct xlog_ticket *q, struct xlog_ticket *tic) | |||
| 547 | (q) = (tic); \ | 538 | (q) = (tic); \ |
| 548 | } \ | 539 | } \ |
| 549 | (tic)->t_flags |= XLOG_TIC_IN_Q; \ | 540 | (tic)->t_flags |= XLOG_TIC_IN_Q; \ |
| 550 | } | 541 | } |
| 551 | 542 | #define XLOG_DEL_TICKETQ(q, tic) \ | |
| 552 | #define XLOG_DEL_TICKETQ(q, tic) xlog_del_ticketq(q, tic) | 543 | { \ |
| 553 | static inline void | ||
| 554 | xlog_del_ticketq(struct xlog_ticket *q, struct xlog_ticket *tic) | ||
| 555 | { \ | ||
| 556 | if ((tic) == (tic)->t_next) { \ | 544 | if ((tic) == (tic)->t_next) { \ |
| 557 | (q) = NULL; \ | 545 | (q) = NULL; \ |
| 558 | } else { \ | 546 | } else { \ |
| @@ -562,7 +550,7 @@ xlog_del_ticketq(struct xlog_ticket *q, struct xlog_ticket *tic) | |||
| 562 | } \ | 550 | } \ |
| 563 | (tic)->t_next = (tic)->t_prev = NULL; \ | 551 | (tic)->t_next = (tic)->t_prev = NULL; \ |
| 564 | (tic)->t_flags &= ~XLOG_TIC_IN_Q; \ | 552 | (tic)->t_flags &= ~XLOG_TIC_IN_Q; \ |
| 565 | } | 553 | } |
| 566 | 554 | ||
| 567 | /* common routines */ | 555 | /* common routines */ |
| 568 | extern xfs_lsn_t xlog_assign_tail_lsn(struct xfs_mount *mp); | 556 | extern xfs_lsn_t xlog_assign_tail_lsn(struct xfs_mount *mp); |
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c index 7c1f74531463..e03fa2a3d5ed 100644 --- a/fs/xfs/xfs_vnodeops.c +++ b/fs/xfs/xfs_vnodeops.c | |||
| @@ -3958,8 +3958,9 @@ xfs_finish_reclaim_all(xfs_mount_t *mp, int noblock) | |||
| 3958 | } | 3958 | } |
| 3959 | } | 3959 | } |
| 3960 | XFS_MOUNT_IUNLOCK(mp); | 3960 | XFS_MOUNT_IUNLOCK(mp); |
| 3961 | xfs_finish_reclaim(ip, noblock, | 3961 | if (xfs_finish_reclaim(ip, noblock, |
| 3962 | XFS_IFLUSH_DELWRI_ELSE_ASYNC); | 3962 | XFS_IFLUSH_DELWRI_ELSE_ASYNC)) |
| 3963 | delay(1); | ||
| 3963 | purged = 1; | 3964 | purged = 1; |
| 3964 | break; | 3965 | break; |
| 3965 | } | 3966 | } |
diff --git a/include/asm-frv/hardirq.h b/include/asm-frv/hardirq.h index 5248ca054909..685123981e8b 100644 --- a/include/asm-frv/hardirq.h +++ b/include/asm-frv/hardirq.h | |||
| @@ -14,6 +14,7 @@ | |||
| 14 | 14 | ||
| 15 | #include <linux/config.h> | 15 | #include <linux/config.h> |
| 16 | #include <linux/threads.h> | 16 | #include <linux/threads.h> |
| 17 | #include <linux/irq.h> | ||
| 17 | 18 | ||
| 18 | typedef struct { | 19 | typedef struct { |
| 19 | unsigned int __softirq_pending; | 20 | unsigned int __softirq_pending; |
diff --git a/include/asm-frv/ide.h b/include/asm-frv/ide.h index f9caecf7e3c0..ae031eaa3dd2 100644 --- a/include/asm-frv/ide.h +++ b/include/asm-frv/ide.h | |||
| @@ -33,10 +33,10 @@ | |||
| 33 | /* | 33 | /* |
| 34 | * some bits needed for parts of the IDE subsystem to compile | 34 | * some bits needed for parts of the IDE subsystem to compile |
| 35 | */ | 35 | */ |
| 36 | #define __ide_mm_insw(port, addr, n) insw(port, addr, n) | 36 | #define __ide_mm_insw(port, addr, n) insw((unsigned long) (port), addr, n) |
| 37 | #define __ide_mm_insl(port, addr, n) insl(port, addr, n) | 37 | #define __ide_mm_insl(port, addr, n) insl((unsigned long) (port), addr, n) |
| 38 | #define __ide_mm_outsw(port, addr, n) outsw(port, addr, n) | 38 | #define __ide_mm_outsw(port, addr, n) outsw((unsigned long) (port), addr, n) |
| 39 | #define __ide_mm_outsl(port, addr, n) outsl(port, addr, n) | 39 | #define __ide_mm_outsl(port, addr, n) outsl((unsigned long) (port), addr, n) |
| 40 | 40 | ||
| 41 | 41 | ||
| 42 | #endif /* __KERNEL__ */ | 42 | #endif /* __KERNEL__ */ |
diff --git a/include/asm-frv/page.h b/include/asm-frv/page.h index 4feba567e7fd..b8221b611b5c 100644 --- a/include/asm-frv/page.h +++ b/include/asm-frv/page.h | |||
| @@ -47,8 +47,8 @@ typedef struct { unsigned long pgprot; } pgprot_t; | |||
| 47 | 47 | ||
| 48 | #define devmem_is_allowed(pfn) 1 | 48 | #define devmem_is_allowed(pfn) 1 |
| 49 | 49 | ||
| 50 | #define __pa(vaddr) virt_to_phys((void *) vaddr) | 50 | #define __pa(vaddr) virt_to_phys((void *) (unsigned long) (vaddr)) |
| 51 | #define __va(paddr) phys_to_virt((unsigned long) paddr) | 51 | #define __va(paddr) phys_to_virt((unsigned long) (paddr)) |
| 52 | 52 | ||
| 53 | #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) | 53 | #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) |
| 54 | 54 | ||
diff --git a/include/asm-frv/semaphore.h b/include/asm-frv/semaphore.h index b18396288df1..907c5c3643cc 100644 --- a/include/asm-frv/semaphore.h +++ b/include/asm-frv/semaphore.h | |||
| @@ -20,7 +20,7 @@ | |||
| 20 | #include <linux/spinlock.h> | 20 | #include <linux/spinlock.h> |
| 21 | #include <linux/rwsem.h> | 21 | #include <linux/rwsem.h> |
| 22 | 22 | ||
| 23 | #define SEMAPHORE_DEBUG WAITQUEUE_DEBUG | 23 | #define SEMAPHORE_DEBUG 0 |
| 24 | 24 | ||
| 25 | /* | 25 | /* |
| 26 | * the semaphore definition | 26 | * the semaphore definition |
diff --git a/include/asm-frv/thread_info.h b/include/asm-frv/thread_info.h index c8cba7836f0d..60f6b2aee76d 100644 --- a/include/asm-frv/thread_info.h +++ b/include/asm-frv/thread_info.h | |||
| @@ -58,7 +58,7 @@ struct thread_info { | |||
| 58 | 58 | ||
| 59 | #endif | 59 | #endif |
| 60 | 60 | ||
| 61 | #define PREEMPT_ACTIVE 0x4000000 | 61 | #define PREEMPT_ACTIVE 0x10000000 |
| 62 | 62 | ||
| 63 | /* | 63 | /* |
| 64 | * macros/functions for gaining access to the thread information structure | 64 | * macros/functions for gaining access to the thread information structure |
diff --git a/include/asm-m32r/atomic.h b/include/asm-m32r/atomic.h index bfff69a49936..ef1fb8ea4726 100644 --- a/include/asm-m32r/atomic.h +++ b/include/asm-m32r/atomic.h | |||
| @@ -242,6 +242,27 @@ static __inline__ int atomic_dec_return(atomic_t *v) | |||
| 242 | */ | 242 | */ |
| 243 | #define atomic_add_negative(i,v) (atomic_add_return((i), (v)) < 0) | 243 | #define atomic_add_negative(i,v) (atomic_add_return((i), (v)) < 0) |
| 244 | 244 | ||
| 245 | #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) | ||
| 246 | |||
| 247 | /** | ||
| 248 | * atomic_add_unless - add unless the number is a given value | ||
| 249 | * @v: pointer of type atomic_t | ||
| 250 | * @a: the amount to add to v... | ||
| 251 | * @u: ...unless v is equal to u. | ||
| 252 | * | ||
| 253 | * Atomically adds @a to @v, so long as it was not @u. | ||
| 254 | * Returns non-zero if @v was not @u, and zero otherwise. | ||
| 255 | */ | ||
| 256 | #define atomic_add_unless(v, a, u) \ | ||
| 257 | ({ \ | ||
| 258 | int c, old; \ | ||
| 259 | c = atomic_read(v); \ | ||
| 260 | while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ | ||
| 261 | c = old; \ | ||
| 262 | c != (u); \ | ||
| 263 | }) | ||
| 264 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | ||
| 265 | |||
| 245 | static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t *addr) | 266 | static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t *addr) |
| 246 | { | 267 | { |
| 247 | unsigned long flags; | 268 | unsigned long flags; |
diff --git a/include/asm-m32r/ide.h b/include/asm-m32r/ide.h index 194393bd8beb..f7aa96970d18 100644 --- a/include/asm-m32r/ide.h +++ b/include/asm-m32r/ide.h | |||
| @@ -25,18 +25,21 @@ | |||
| 25 | # endif | 25 | # endif |
| 26 | #endif | 26 | #endif |
| 27 | 27 | ||
| 28 | #if defined(CONFIG_PLAT_M32700UT) | 28 | #include <asm/m32r.h> |
| 29 | #include <asm/irq.h> | 29 | |
| 30 | #include <asm/m32700ut/m32700ut_pld.h> | ||
| 31 | #endif | ||
| 32 | 30 | ||
| 33 | #define IDE_ARCH_OBSOLETE_DEFAULTS | 31 | #define IDE_ARCH_OBSOLETE_DEFAULTS |
| 34 | 32 | ||
| 35 | static __inline__ int ide_default_irq(unsigned long base) | 33 | static __inline__ int ide_default_irq(unsigned long base) |
| 36 | { | 34 | { |
| 37 | switch (base) { | 35 | switch (base) { |
| 38 | #if defined(CONFIG_PLAT_M32700UT) || defined(CONFIG_PLAT_MAPPI2) || defined(CONFIG_PLAT_MAPPI3) | 36 | #if defined(CONFIG_PLAT_M32700UT) || defined(CONFIG_PLAT_MAPPI2) |
| 37 | case 0x1f0: return PLD_IRQ_CFIREQ; | ||
| 38 | default: | ||
| 39 | return 0; | ||
| 40 | #elif defined(CONFIG_PLAT_MAPPI3) | ||
| 39 | case 0x1f0: return PLD_IRQ_CFIREQ; | 41 | case 0x1f0: return PLD_IRQ_CFIREQ; |
| 42 | case 0x170: return PLD_IRQ_IDEIREQ; | ||
| 40 | default: | 43 | default: |
| 41 | return 0; | 44 | return 0; |
| 42 | #else | 45 | #else |
diff --git a/include/asm-m32r/mappi3/mappi3_pld.h b/include/asm-m32r/mappi3/mappi3_pld.h index 3f1551f7f01f..1d3c25d61bcb 100644 --- a/include/asm-m32r/mappi3/mappi3_pld.h +++ b/include/asm-m32r/mappi3/mappi3_pld.h | |||
| @@ -59,7 +59,7 @@ | |||
| 59 | #define M32R_IRQ_I2C (28) /* I2C-BUS */ | 59 | #define M32R_IRQ_I2C (28) /* I2C-BUS */ |
| 60 | #define PLD_IRQ_CFIREQ (6) /* INT5 CFC Card Interrupt */ | 60 | #define PLD_IRQ_CFIREQ (6) /* INT5 CFC Card Interrupt */ |
| 61 | #define PLD_IRQ_CFC_INSERT (7) /* INT6 CFC Card Insert */ | 61 | #define PLD_IRQ_CFC_INSERT (7) /* INT6 CFC Card Insert */ |
| 62 | #define PLD_IRQ_CFC_EJECT (8) /* INT7 CFC Card Eject */ | 62 | #define PLD_IRQ_IDEIREQ (8) /* INT7 IDE Interrupt */ |
| 63 | #define PLD_IRQ_MMCCARD (43) /* MMC Card Insert */ | 63 | #define PLD_IRQ_MMCCARD (43) /* MMC Card Insert */ |
| 64 | #define PLD_IRQ_MMCIRQ (44) /* MMC Transfer Done */ | 64 | #define PLD_IRQ_MMCIRQ (44) /* MMC Transfer Done */ |
| 65 | 65 | ||
diff --git a/include/asm-m32r/system.h b/include/asm-m32r/system.h index 73348c3f858b..5eee832b73a0 100644 --- a/include/asm-m32r/system.h +++ b/include/asm-m32r/system.h | |||
| @@ -11,6 +11,7 @@ | |||
| 11 | */ | 11 | */ |
| 12 | 12 | ||
| 13 | #include <linux/config.h> | 13 | #include <linux/config.h> |
| 14 | #include <asm/assembler.h> | ||
| 14 | 15 | ||
| 15 | #ifdef __KERNEL__ | 16 | #ifdef __KERNEL__ |
| 16 | 17 | ||
| @@ -132,8 +133,6 @@ static inline void local_irq_disable(void) | |||
| 132 | !(flags & 0x40); \ | 133 | !(flags & 0x40); \ |
| 133 | }) | 134 | }) |
| 134 | 135 | ||
| 135 | #endif /* __KERNEL__ */ | ||
| 136 | |||
| 137 | #define nop() __asm__ __volatile__ ("nop" : : ) | 136 | #define nop() __asm__ __volatile__ ("nop" : : ) |
| 138 | 137 | ||
| 139 | #define xchg(ptr,x) \ | 138 | #define xchg(ptr,x) \ |
| @@ -213,6 +212,67 @@ static __inline__ unsigned long __xchg(unsigned long x, volatile void * ptr, | |||
| 213 | return (tmp); | 212 | return (tmp); |
| 214 | } | 213 | } |
| 215 | 214 | ||
| 215 | #define __HAVE_ARCH_CMPXCHG 1 | ||
| 216 | |||
| 217 | static __inline__ unsigned long | ||
| 218 | __cmpxchg_u32(volatile unsigned int *p, unsigned int old, unsigned int new) | ||
| 219 | { | ||
| 220 | unsigned long flags; | ||
| 221 | unsigned int retval; | ||
| 222 | |||
| 223 | local_irq_save(flags); | ||
| 224 | __asm__ __volatile__ ( | ||
| 225 | DCACHE_CLEAR("%0", "r4", "%1") | ||
| 226 | M32R_LOCK" %0, @%1; \n" | ||
| 227 | " bne %0, %2, 1f; \n" | ||
| 228 | M32R_UNLOCK" %3, @%1; \n" | ||
| 229 | " bra 2f; \n" | ||
| 230 | " .fillinsn \n" | ||
| 231 | "1:" | ||
| 232 | M32R_UNLOCK" %2, @%1; \n" | ||
| 233 | " .fillinsn \n" | ||
| 234 | "2:" | ||
| 235 | : "=&r" (retval) | ||
| 236 | : "r" (p), "r" (old), "r" (new) | ||
| 237 | : "cbit", "memory" | ||
| 238 | #ifdef CONFIG_CHIP_M32700_TS1 | ||
| 239 | , "r4" | ||
| 240 | #endif /* CONFIG_CHIP_M32700_TS1 */ | ||
| 241 | ); | ||
| 242 | local_irq_restore(flags); | ||
| 243 | |||
| 244 | return retval; | ||
| 245 | } | ||
| 246 | |||
| 247 | /* This function doesn't exist, so you'll get a linker error | ||
| 248 | if something tries to do an invalid cmpxchg(). */ | ||
| 249 | extern void __cmpxchg_called_with_bad_pointer(void); | ||
| 250 | |||
| 251 | static __inline__ unsigned long | ||
| 252 | __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) | ||
| 253 | { | ||
| 254 | switch (size) { | ||
| 255 | case 4: | ||
| 256 | return __cmpxchg_u32(ptr, old, new); | ||
| 257 | #if 0 /* we don't have __cmpxchg_u64 */ | ||
| 258 | case 8: | ||
| 259 | return __cmpxchg_u64(ptr, old, new); | ||
| 260 | #endif /* 0 */ | ||
| 261 | } | ||
| 262 | __cmpxchg_called_with_bad_pointer(); | ||
| 263 | return old; | ||
| 264 | } | ||
| 265 | |||
| 266 | #define cmpxchg(ptr,o,n) \ | ||
| 267 | ({ \ | ||
| 268 | __typeof__(*(ptr)) _o_ = (o); \ | ||
| 269 | __typeof__(*(ptr)) _n_ = (n); \ | ||
| 270 | (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ | ||
| 271 | (unsigned long)_n_, sizeof(*(ptr))); \ | ||
| 272 | }) | ||
| 273 | |||
| 274 | #endif /* __KERNEL__ */ | ||
| 275 | |||
| 216 | /* | 276 | /* |
| 217 | * Memory barrier. | 277 | * Memory barrier. |
| 218 | * | 278 | * |
diff --git a/include/asm-powerpc/iommu.h b/include/asm-powerpc/iommu.h index 6a35e6570ccd..f89f06050893 100644 --- a/include/asm-powerpc/iommu.h +++ b/include/asm-powerpc/iommu.h | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation | 2 | * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation |
| 3 | * Rewrite, cleanup: | 3 | * Rewrite, cleanup: |
| 4 | * Copyright (C) 2004 Olof Johansson <olof@austin.ibm.com>, IBM Corporation | 4 | * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation |
| 5 | * | 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License as published by | 7 | * it under the terms of the GNU General Public License as published by |
diff --git a/include/asm-powerpc/page_64.h b/include/asm-powerpc/page_64.h index 58a3dd9a79ec..6642c0125001 100644 --- a/include/asm-powerpc/page_64.h +++ b/include/asm-powerpc/page_64.h | |||
| @@ -103,8 +103,9 @@ extern unsigned int HPAGE_SHIFT; | |||
| 103 | #define HTLB_AREA_SIZE (1UL << HTLB_AREA_SHIFT) | 103 | #define HTLB_AREA_SIZE (1UL << HTLB_AREA_SHIFT) |
| 104 | #define GET_HTLB_AREA(x) ((x) >> HTLB_AREA_SHIFT) | 104 | #define GET_HTLB_AREA(x) ((x) >> HTLB_AREA_SHIFT) |
| 105 | 105 | ||
| 106 | #define LOW_ESID_MASK(addr, len) (((1U << (GET_ESID(addr+len-1)+1)) \ | 106 | #define LOW_ESID_MASK(addr, len) \ |
| 107 | - (1U << GET_ESID(addr))) & 0xffff) | 107 | (((1U << (GET_ESID(min((addr)+(len)-1, 0x100000000UL))+1)) \ |
| 108 | - (1U << GET_ESID(min((addr), 0x100000000UL)))) & 0xffff) | ||
| 108 | #define HTLB_AREA_MASK(addr, len) (((1U << (GET_HTLB_AREA(addr+len-1)+1)) \ | 109 | #define HTLB_AREA_MASK(addr, len) (((1U << (GET_HTLB_AREA(addr+len-1)+1)) \ |
| 109 | - (1U << GET_HTLB_AREA(addr))) & 0xffff) | 110 | - (1U << GET_HTLB_AREA(addr))) & 0xffff) |
| 110 | 111 | ||
| @@ -113,17 +114,21 @@ extern unsigned int HPAGE_SHIFT; | |||
| 113 | #define ARCH_HAS_SETCLEAR_HUGE_PTE | 114 | #define ARCH_HAS_SETCLEAR_HUGE_PTE |
| 114 | 115 | ||
| 115 | #define touches_hugepage_low_range(mm, addr, len) \ | 116 | #define touches_hugepage_low_range(mm, addr, len) \ |
| 116 | (LOW_ESID_MASK((addr), (len)) & (mm)->context.low_htlb_areas) | 117 | (((addr) < 0x100000000UL) \ |
| 118 | && (LOW_ESID_MASK((addr), (len)) & (mm)->context.low_htlb_areas)) | ||
| 117 | #define touches_hugepage_high_range(mm, addr, len) \ | 119 | #define touches_hugepage_high_range(mm, addr, len) \ |
| 118 | (HTLB_AREA_MASK((addr), (len)) & (mm)->context.high_htlb_areas) | 120 | ((((addr) + (len)) > 0x100000000UL) \ |
| 121 | && (HTLB_AREA_MASK((addr), (len)) & (mm)->context.high_htlb_areas)) | ||
| 119 | 122 | ||
| 120 | #define __within_hugepage_low_range(addr, len, segmask) \ | 123 | #define __within_hugepage_low_range(addr, len, segmask) \ |
| 121 | ((LOW_ESID_MASK((addr), (len)) | (segmask)) == (segmask)) | 124 | ( (((addr)+(len)) <= 0x100000000UL) \ |
| 125 | && ((LOW_ESID_MASK((addr), (len)) | (segmask)) == (segmask))) | ||
| 122 | #define within_hugepage_low_range(addr, len) \ | 126 | #define within_hugepage_low_range(addr, len) \ |
| 123 | __within_hugepage_low_range((addr), (len), \ | 127 | __within_hugepage_low_range((addr), (len), \ |
| 124 | current->mm->context.low_htlb_areas) | 128 | current->mm->context.low_htlb_areas) |
| 125 | #define __within_hugepage_high_range(addr, len, zonemask) \ | 129 | #define __within_hugepage_high_range(addr, len, zonemask) \ |
| 126 | ((HTLB_AREA_MASK((addr), (len)) | (zonemask)) == (zonemask)) | 130 | ( ((addr) >= 0x100000000UL) \ |
| 131 | && ((HTLB_AREA_MASK((addr), (len)) | (zonemask)) == (zonemask))) | ||
| 127 | #define within_hugepage_high_range(addr, len) \ | 132 | #define within_hugepage_high_range(addr, len) \ |
| 128 | __within_hugepage_high_range((addr), (len), \ | 133 | __within_hugepage_high_range((addr), (len), \ |
| 129 | current->mm->context.high_htlb_areas) | 134 | current->mm->context.high_htlb_areas) |
diff --git a/include/asm-powerpc/tce.h b/include/asm-powerpc/tce.h index d099d5200f9b..980a094fd5a7 100644 --- a/include/asm-powerpc/tce.h +++ b/include/asm-powerpc/tce.h | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation | 2 | * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation |
| 3 | * Rewrite, cleanup: | 3 | * Rewrite, cleanup: |
| 4 | * Copyright (C) 2004 Olof Johansson <olof@austin.ibm.com>, IBM Corporation | 4 | * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation |
| 5 | * | 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License as published by | 7 | * it under the terms of the GNU General Public License as published by |
diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h index 9a02879b235d..f0a9b44d3eb5 100644 --- a/include/asm-sparc64/pgtable.h +++ b/include/asm-sparc64/pgtable.h | |||
| @@ -348,16 +348,6 @@ extern unsigned long find_ecache_flush_span(unsigned long size); | |||
| 348 | struct vm_area_struct; | 348 | struct vm_area_struct; |
| 349 | extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); | 349 | extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); |
| 350 | 350 | ||
| 351 | /* Make a non-present pseudo-TTE. */ | ||
| 352 | static inline pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space) | ||
| 353 | { | ||
| 354 | pte_t pte; | ||
| 355 | pte_val(pte) = (((page) | pgprot_val(prot) | _PAGE_E) & | ||
| 356 | ~(unsigned long)_PAGE_CACHE); | ||
| 357 | pte_val(pte) |= (((unsigned long)space) << 32); | ||
| 358 | return pte; | ||
| 359 | } | ||
| 360 | |||
| 361 | /* Encode and de-code a swap entry */ | 351 | /* Encode and de-code a swap entry */ |
| 362 | #define __swp_type(entry) (((entry).val >> PAGE_SHIFT) & 0xffUL) | 352 | #define __swp_type(entry) (((entry).val >> PAGE_SHIFT) & 0xffUL) |
| 363 | #define __swp_offset(entry) ((entry).val >> (PAGE_SHIFT + 8UL)) | 353 | #define __swp_offset(entry) ((entry).val >> (PAGE_SHIFT + 8UL)) |
diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 43c44530ef9d..0ed1d4853c69 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h | |||
| @@ -65,10 +65,9 @@ extern struct sysdev_class cpu_sysdev_class; | |||
| 65 | 65 | ||
| 66 | #ifdef CONFIG_HOTPLUG_CPU | 66 | #ifdef CONFIG_HOTPLUG_CPU |
| 67 | /* Stop CPUs going up and down. */ | 67 | /* Stop CPUs going up and down. */ |
| 68 | extern struct semaphore cpucontrol; | 68 | extern void lock_cpu_hotplug(void); |
| 69 | #define lock_cpu_hotplug() down(&cpucontrol) | 69 | extern void unlock_cpu_hotplug(void); |
| 70 | #define unlock_cpu_hotplug() up(&cpucontrol) | 70 | extern int lock_cpu_hotplug_interruptible(void); |
| 71 | #define lock_cpu_hotplug_interruptible() down_interruptible(&cpucontrol) | ||
| 72 | #define hotcpu_notifier(fn, pri) { \ | 71 | #define hotcpu_notifier(fn, pri) { \ |
| 73 | static struct notifier_block fn##_nb = \ | 72 | static struct notifier_block fn##_nb = \ |
| 74 | { .notifier_call = fn, .priority = pri }; \ | 73 | { .notifier_call = fn, .priority = pri }; \ |
diff --git a/include/linux/memory.h b/include/linux/memory.h index 9a424383e6c6..dc4081b6f161 100644 --- a/include/linux/memory.h +++ b/include/linux/memory.h | |||
| @@ -85,7 +85,6 @@ struct notifier_block; | |||
| 85 | extern int register_memory_notifier(struct notifier_block *nb); | 85 | extern int register_memory_notifier(struct notifier_block *nb); |
| 86 | extern void unregister_memory_notifier(struct notifier_block *nb); | 86 | extern void unregister_memory_notifier(struct notifier_block *nb); |
| 87 | 87 | ||
| 88 | extern struct sysdev_class memory_sysdev_class; | ||
| 89 | #endif /* CONFIG_MEMORY_HOTPLUG */ | 88 | #endif /* CONFIG_MEMORY_HOTPLUG */ |
| 90 | 89 | ||
| 91 | #define hotplug_memory_notifier(fn, pri) { \ | 90 | #define hotplug_memory_notifier(fn, pri) { \ |
diff --git a/include/linux/mm.h b/include/linux/mm.h index f0cdfd18db55..6a75a7a78bf1 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
| @@ -145,7 +145,7 @@ extern unsigned int kobjsize(const void *objp); | |||
| 145 | #define VM_GROWSDOWN 0x00000100 /* general info on the segment */ | 145 | #define VM_GROWSDOWN 0x00000100 /* general info on the segment */ |
| 146 | #define VM_GROWSUP 0x00000200 | 146 | #define VM_GROWSUP 0x00000200 |
| 147 | #define VM_SHM 0x00000000 /* Means nothing: delete it later */ | 147 | #define VM_SHM 0x00000000 /* Means nothing: delete it later */ |
| 148 | #define VM_UNPAGED 0x00000400 /* Pages managed without map count */ | 148 | #define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */ |
| 149 | #define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */ | 149 | #define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */ |
| 150 | 150 | ||
| 151 | #define VM_EXECUTABLE 0x00001000 | 151 | #define VM_EXECUTABLE 0x00001000 |
| @@ -664,6 +664,7 @@ struct zap_details { | |||
| 664 | unsigned long truncate_count; /* Compare vm_truncate_count */ | 664 | unsigned long truncate_count; /* Compare vm_truncate_count */ |
| 665 | }; | 665 | }; |
| 666 | 666 | ||
| 667 | struct page *vm_normal_page(struct vm_area_struct *, unsigned long, pte_t); | ||
| 667 | unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address, | 668 | unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address, |
| 668 | unsigned long size, struct zap_details *); | 669 | unsigned long size, struct zap_details *); |
| 669 | unsigned long unmap_vmas(struct mmu_gather **tlb, | 670 | unsigned long unmap_vmas(struct mmu_gather **tlb, |
| @@ -953,7 +954,7 @@ unsigned long vmalloc_to_pfn(void *addr); | |||
| 953 | int remap_pfn_range(struct vm_area_struct *, unsigned long addr, | 954 | int remap_pfn_range(struct vm_area_struct *, unsigned long addr, |
| 954 | unsigned long pfn, unsigned long size, pgprot_t); | 955 | unsigned long pfn, unsigned long size, pgprot_t); |
| 955 | 956 | ||
| 956 | struct page *follow_page(struct mm_struct *, unsigned long address, | 957 | struct page *follow_page(struct vm_area_struct *, unsigned long address, |
| 957 | unsigned int foll_flags); | 958 | unsigned int foll_flags); |
| 958 | #define FOLL_WRITE 0x01 /* check pte is writable */ | 959 | #define FOLL_WRITE 0x01 /* check pte is writable */ |
| 959 | #define FOLL_TOUCH 0x02 /* mark page accessed */ | 960 | #define FOLL_TOUCH 0x02 /* mark page accessed */ |
diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 35b30e6c8cf8..33261f1d2239 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h | |||
| @@ -89,7 +89,7 @@ static inline void page_dup_rmap(struct page *page) | |||
| 89 | /* | 89 | /* |
| 90 | * Called from mm/vmscan.c to handle paging out | 90 | * Called from mm/vmscan.c to handle paging out |
| 91 | */ | 91 | */ |
| 92 | int page_referenced(struct page *, int is_locked, int ignore_token); | 92 | int page_referenced(struct page *, int is_locked); |
| 93 | int try_to_unmap(struct page *); | 93 | int try_to_unmap(struct page *); |
| 94 | 94 | ||
| 95 | /* | 95 | /* |
| @@ -109,7 +109,7 @@ unsigned long page_address_in_vma(struct page *, struct vm_area_struct *); | |||
| 109 | #define anon_vma_prepare(vma) (0) | 109 | #define anon_vma_prepare(vma) (0) |
| 110 | #define anon_vma_link(vma) do {} while (0) | 110 | #define anon_vma_link(vma) do {} while (0) |
| 111 | 111 | ||
| 112 | #define page_referenced(page,l,i) TestClearPageReferenced(page) | 112 | #define page_referenced(page,l) TestClearPageReferenced(page) |
| 113 | #define try_to_unmap(page) SWAP_FAIL | 113 | #define try_to_unmap(page) SWAP_FAIL |
| 114 | 114 | ||
| 115 | #endif /* CONFIG_MMU */ | 115 | #endif /* CONFIG_MMU */ |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 2038bd27b041..b0ad6f30679e 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -908,7 +908,6 @@ do { if (atomic_dec_and_test(&(tsk)->usage)) __put_task_struct(tsk); } while(0) | |||
| 908 | #define PF_SYNCWRITE 0x00200000 /* I am doing a sync write */ | 908 | #define PF_SYNCWRITE 0x00200000 /* I am doing a sync write */ |
| 909 | #define PF_BORROWED_MM 0x00400000 /* I am a kthread doing use_mm */ | 909 | #define PF_BORROWED_MM 0x00400000 /* I am a kthread doing use_mm */ |
| 910 | #define PF_RANDOMIZE 0x00800000 /* randomize virtual address space */ | 910 | #define PF_RANDOMIZE 0x00800000 /* randomize virtual address space */ |
| 911 | #define PF_HOTPLUG_CPU 0x01000000 /* Currently performing CPU hotplug */ | ||
| 912 | 911 | ||
| 913 | /* | 912 | /* |
| 914 | * Only the _current_ task can read/write to tsk->flags, but other | 913 | * Only the _current_ task can read/write to tsk->flags, but other |
diff --git a/include/linux/swap.h b/include/linux/swap.h index 20c975642cab..508668f840b6 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h | |||
| @@ -239,6 +239,11 @@ static inline void put_swap_token(struct mm_struct *mm) | |||
| 239 | __put_swap_token(mm); | 239 | __put_swap_token(mm); |
| 240 | } | 240 | } |
| 241 | 241 | ||
| 242 | static inline void disable_swap_token(void) | ||
| 243 | { | ||
| 244 | put_swap_token(swap_token_mm); | ||
| 245 | } | ||
| 246 | |||
| 242 | #else /* CONFIG_SWAP */ | 247 | #else /* CONFIG_SWAP */ |
| 243 | 248 | ||
| 244 | #define total_swap_pages 0 | 249 | #define total_swap_pages 0 |
| @@ -283,6 +288,7 @@ static inline swp_entry_t get_swap_page(void) | |||
| 283 | #define put_swap_token(x) do { } while(0) | 288 | #define put_swap_token(x) do { } while(0) |
| 284 | #define grab_swap_token() do { } while(0) | 289 | #define grab_swap_token() do { } while(0) |
| 285 | #define has_swap_token(x) 0 | 290 | #define has_swap_token(x) 0 |
| 291 | #define disable_swap_token() do { } while(0) | ||
| 286 | 292 | ||
| 287 | #endif /* CONFIG_SWAP */ | 293 | #endif /* CONFIG_SWAP */ |
| 288 | #endif /* __KERNEL__*/ | 294 | #endif /* __KERNEL__*/ |
diff --git a/kernel/cpu.c b/kernel/cpu.c index d61ba88f34e5..e882c6babf41 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
| @@ -16,47 +16,76 @@ | |||
| 16 | #include <asm/semaphore.h> | 16 | #include <asm/semaphore.h> |
| 17 | 17 | ||
| 18 | /* This protects CPUs going up and down... */ | 18 | /* This protects CPUs going up and down... */ |
| 19 | DECLARE_MUTEX(cpucontrol); | 19 | static DECLARE_MUTEX(cpucontrol); |
| 20 | EXPORT_SYMBOL_GPL(cpucontrol); | ||
| 21 | 20 | ||
| 22 | static struct notifier_block *cpu_chain; | 21 | static struct notifier_block *cpu_chain; |
| 23 | 22 | ||
| 24 | /* | 23 | #ifdef CONFIG_HOTPLUG_CPU |
| 25 | * Used to check by callers if they need to acquire the cpucontrol | 24 | static struct task_struct *lock_cpu_hotplug_owner; |
| 26 | * or not to protect a cpu from being removed. Its sometimes required to | 25 | static int lock_cpu_hotplug_depth; |
| 27 | * call these functions both for normal operations, and in response to | ||
| 28 | * a cpu being added/removed. If the context of the call is in the same | ||
| 29 | * thread context as a CPU hotplug thread, we dont need to take the lock | ||
| 30 | * since its already protected | ||
| 31 | * check drivers/cpufreq/cpufreq.c for its usage - Ashok Raj | ||
| 32 | */ | ||
| 33 | 26 | ||
| 34 | int current_in_cpu_hotplug(void) | 27 | static int __lock_cpu_hotplug(int interruptible) |
| 35 | { | 28 | { |
| 36 | return (current->flags & PF_HOTPLUG_CPU); | 29 | int ret = 0; |
| 30 | |||
| 31 | if (lock_cpu_hotplug_owner != current) { | ||
| 32 | if (interruptible) | ||
| 33 | ret = down_interruptible(&cpucontrol); | ||
| 34 | else | ||
| 35 | down(&cpucontrol); | ||
| 36 | } | ||
| 37 | |||
| 38 | /* | ||
| 39 | * Set only if we succeed in locking | ||
| 40 | */ | ||
| 41 | if (!ret) { | ||
| 42 | lock_cpu_hotplug_depth++; | ||
| 43 | lock_cpu_hotplug_owner = current; | ||
| 44 | } | ||
| 45 | |||
| 46 | return ret; | ||
| 37 | } | 47 | } |
| 38 | 48 | ||
| 39 | EXPORT_SYMBOL_GPL(current_in_cpu_hotplug); | 49 | void lock_cpu_hotplug(void) |
| 50 | { | ||
| 51 | __lock_cpu_hotplug(0); | ||
| 52 | } | ||
| 53 | EXPORT_SYMBOL_GPL(lock_cpu_hotplug); | ||
| 40 | 54 | ||
| 55 | void unlock_cpu_hotplug(void) | ||
| 56 | { | ||
| 57 | if (--lock_cpu_hotplug_depth == 0) { | ||
| 58 | lock_cpu_hotplug_owner = NULL; | ||
| 59 | up(&cpucontrol); | ||
| 60 | } | ||
| 61 | } | ||
| 62 | EXPORT_SYMBOL_GPL(unlock_cpu_hotplug); | ||
| 63 | |||
| 64 | int lock_cpu_hotplug_interruptible(void) | ||
| 65 | { | ||
| 66 | return __lock_cpu_hotplug(1); | ||
| 67 | } | ||
| 68 | EXPORT_SYMBOL_GPL(lock_cpu_hotplug_interruptible); | ||
| 69 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
| 41 | 70 | ||
| 42 | /* Need to know about CPUs going up/down? */ | 71 | /* Need to know about CPUs going up/down? */ |
| 43 | int register_cpu_notifier(struct notifier_block *nb) | 72 | int register_cpu_notifier(struct notifier_block *nb) |
| 44 | { | 73 | { |
| 45 | int ret; | 74 | int ret; |
| 46 | 75 | ||
| 47 | if ((ret = down_interruptible(&cpucontrol)) != 0) | 76 | if ((ret = lock_cpu_hotplug_interruptible()) != 0) |
| 48 | return ret; | 77 | return ret; |
| 49 | ret = notifier_chain_register(&cpu_chain, nb); | 78 | ret = notifier_chain_register(&cpu_chain, nb); |
| 50 | up(&cpucontrol); | 79 | unlock_cpu_hotplug(); |
| 51 | return ret; | 80 | return ret; |
| 52 | } | 81 | } |
| 53 | EXPORT_SYMBOL(register_cpu_notifier); | 82 | EXPORT_SYMBOL(register_cpu_notifier); |
| 54 | 83 | ||
| 55 | void unregister_cpu_notifier(struct notifier_block *nb) | 84 | void unregister_cpu_notifier(struct notifier_block *nb) |
| 56 | { | 85 | { |
| 57 | down(&cpucontrol); | 86 | lock_cpu_hotplug(); |
| 58 | notifier_chain_unregister(&cpu_chain, nb); | 87 | notifier_chain_unregister(&cpu_chain, nb); |
| 59 | up(&cpucontrol); | 88 | unlock_cpu_hotplug(); |
| 60 | } | 89 | } |
| 61 | EXPORT_SYMBOL(unregister_cpu_notifier); | 90 | EXPORT_SYMBOL(unregister_cpu_notifier); |
| 62 | 91 | ||
| @@ -112,13 +141,6 @@ int cpu_down(unsigned int cpu) | |||
| 112 | goto out; | 141 | goto out; |
| 113 | } | 142 | } |
| 114 | 143 | ||
| 115 | /* | ||
| 116 | * Leave a trace in current->flags indicating we are already in | ||
| 117 | * process of performing CPU hotplug. Callers can check if cpucontrol | ||
| 118 | * is already acquired by current thread, and if so not cause | ||
| 119 | * a dead lock by not acquiring the lock | ||
| 120 | */ | ||
| 121 | current->flags |= PF_HOTPLUG_CPU; | ||
| 122 | err = notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE, | 144 | err = notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE, |
| 123 | (void *)(long)cpu); | 145 | (void *)(long)cpu); |
| 124 | if (err == NOTIFY_BAD) { | 146 | if (err == NOTIFY_BAD) { |
| @@ -171,7 +193,6 @@ out_thread: | |||
| 171 | out_allowed: | 193 | out_allowed: |
| 172 | set_cpus_allowed(current, old_allowed); | 194 | set_cpus_allowed(current, old_allowed); |
| 173 | out: | 195 | out: |
| 174 | current->flags &= ~PF_HOTPLUG_CPU; | ||
| 175 | unlock_cpu_hotplug(); | 196 | unlock_cpu_hotplug(); |
| 176 | return err; | 197 | return err; |
| 177 | } | 198 | } |
| @@ -182,7 +203,7 @@ int __devinit cpu_up(unsigned int cpu) | |||
| 182 | int ret; | 203 | int ret; |
| 183 | void *hcpu = (void *)(long)cpu; | 204 | void *hcpu = (void *)(long)cpu; |
| 184 | 205 | ||
| 185 | if ((ret = down_interruptible(&cpucontrol)) != 0) | 206 | if ((ret = lock_cpu_hotplug_interruptible()) != 0) |
| 186 | return ret; | 207 | return ret; |
| 187 | 208 | ||
| 188 | if (cpu_online(cpu) || !cpu_present(cpu)) { | 209 | if (cpu_online(cpu) || !cpu_present(cpu)) { |
| @@ -190,11 +211,6 @@ int __devinit cpu_up(unsigned int cpu) | |||
| 190 | goto out; | 211 | goto out; |
| 191 | } | 212 | } |
| 192 | 213 | ||
| 193 | /* | ||
| 194 | * Leave a trace in current->flags indicating we are already in | ||
| 195 | * process of performing CPU hotplug. | ||
| 196 | */ | ||
| 197 | current->flags |= PF_HOTPLUG_CPU; | ||
| 198 | ret = notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu); | 214 | ret = notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu); |
| 199 | if (ret == NOTIFY_BAD) { | 215 | if (ret == NOTIFY_BAD) { |
| 200 | printk("%s: attempt to bring up CPU %u failed\n", | 216 | printk("%s: attempt to bring up CPU %u failed\n", |
| @@ -217,7 +233,6 @@ out_notify: | |||
| 217 | if (ret != 0) | 233 | if (ret != 0) |
| 218 | notifier_call_chain(&cpu_chain, CPU_UP_CANCELED, hcpu); | 234 | notifier_call_chain(&cpu_chain, CPU_UP_CANCELED, hcpu); |
| 219 | out: | 235 | out: |
| 220 | current->flags &= ~PF_HOTPLUG_CPU; | 236 | unlock_cpu_hotplug(); |
| 221 | up(&cpucontrol); | ||
| 222 | return ret; | 237 | return ret; |
| 223 | } | 238 | } |
diff --git a/kernel/fork.c b/kernel/fork.c index 1c1cf8dc396b..fb8572a42297 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
| @@ -1124,8 +1124,6 @@ static task_t *copy_process(unsigned long clone_flags, | |||
| 1124 | if (unlikely(p->ptrace & PT_PTRACED)) | 1124 | if (unlikely(p->ptrace & PT_PTRACED)) |
| 1125 | __ptrace_link(p, current->parent); | 1125 | __ptrace_link(p, current->parent); |
| 1126 | 1126 | ||
| 1127 | cpuset_fork(p); | ||
| 1128 | |||
| 1129 | attach_pid(p, PIDTYPE_PID, p->pid); | 1127 | attach_pid(p, PIDTYPE_PID, p->pid); |
| 1130 | attach_pid(p, PIDTYPE_TGID, p->tgid); | 1128 | attach_pid(p, PIDTYPE_TGID, p->tgid); |
| 1131 | if (thread_group_leader(p)) { | 1129 | if (thread_group_leader(p)) { |
| @@ -1135,13 +1133,14 @@ static task_t *copy_process(unsigned long clone_flags, | |||
| 1135 | __get_cpu_var(process_counts)++; | 1133 | __get_cpu_var(process_counts)++; |
| 1136 | } | 1134 | } |
| 1137 | 1135 | ||
| 1138 | proc_fork_connector(p); | ||
| 1139 | if (!current->signal->tty && p->signal->tty) | 1136 | if (!current->signal->tty && p->signal->tty) |
| 1140 | p->signal->tty = NULL; | 1137 | p->signal->tty = NULL; |
| 1141 | 1138 | ||
| 1142 | nr_threads++; | 1139 | nr_threads++; |
| 1143 | total_forks++; | 1140 | total_forks++; |
| 1144 | write_unlock_irq(&tasklist_lock); | 1141 | write_unlock_irq(&tasklist_lock); |
| 1142 | proc_fork_connector(p); | ||
| 1143 | cpuset_fork(p); | ||
| 1145 | retval = 0; | 1144 | retval = 0; |
| 1146 | 1145 | ||
| 1147 | fork_out: | 1146 | fork_out: |
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index 84af54c39e1b..cae4f5728997 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c | |||
| @@ -36,7 +36,7 @@ timespec_to_sample(clockid_t which_clock, const struct timespec *tp) | |||
| 36 | union cpu_time_count ret; | 36 | union cpu_time_count ret; |
| 37 | ret.sched = 0; /* high half always zero when .cpu used */ | 37 | ret.sched = 0; /* high half always zero when .cpu used */ |
| 38 | if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { | 38 | if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { |
| 39 | ret.sched = tp->tv_sec * NSEC_PER_SEC + tp->tv_nsec; | 39 | ret.sched = (unsigned long long)tp->tv_sec * NSEC_PER_SEC + tp->tv_nsec; |
| 40 | } else { | 40 | } else { |
| 41 | ret.cpu = timespec_to_cputime(tp); | 41 | ret.cpu = timespec_to_cputime(tp); |
| 42 | } | 42 | } |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 42df83d7fad2..2bd5aee1c736 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
| @@ -102,7 +102,7 @@ int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work) | |||
| 102 | 102 | ||
| 103 | if (!test_and_set_bit(0, &work->pending)) { | 103 | if (!test_and_set_bit(0, &work->pending)) { |
| 104 | if (unlikely(is_single_threaded(wq))) | 104 | if (unlikely(is_single_threaded(wq))) |
| 105 | cpu = 0; | 105 | cpu = any_online_cpu(cpu_online_map); |
| 106 | BUG_ON(!list_empty(&work->entry)); | 106 | BUG_ON(!list_empty(&work->entry)); |
| 107 | __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work); | 107 | __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work); |
| 108 | ret = 1; | 108 | ret = 1; |
| @@ -118,7 +118,7 @@ static void delayed_work_timer_fn(unsigned long __data) | |||
| 118 | int cpu = smp_processor_id(); | 118 | int cpu = smp_processor_id(); |
| 119 | 119 | ||
| 120 | if (unlikely(is_single_threaded(wq))) | 120 | if (unlikely(is_single_threaded(wq))) |
| 121 | cpu = 0; | 121 | cpu = any_online_cpu(cpu_online_map); |
| 122 | 122 | ||
| 123 | __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work); | 123 | __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work); |
| 124 | } | 124 | } |
| @@ -266,8 +266,8 @@ void fastcall flush_workqueue(struct workqueue_struct *wq) | |||
| 266 | might_sleep(); | 266 | might_sleep(); |
| 267 | 267 | ||
| 268 | if (is_single_threaded(wq)) { | 268 | if (is_single_threaded(wq)) { |
| 269 | /* Always use cpu 0's area. */ | 269 | /* Always use first cpu's area. */ |
| 270 | flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, 0)); | 270 | flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, any_online_cpu(cpu_online_map))); |
| 271 | } else { | 271 | } else { |
| 272 | int cpu; | 272 | int cpu; |
| 273 | 273 | ||
| @@ -320,7 +320,7 @@ struct workqueue_struct *__create_workqueue(const char *name, | |||
| 320 | lock_cpu_hotplug(); | 320 | lock_cpu_hotplug(); |
| 321 | if (singlethread) { | 321 | if (singlethread) { |
| 322 | INIT_LIST_HEAD(&wq->list); | 322 | INIT_LIST_HEAD(&wq->list); |
| 323 | p = create_workqueue_thread(wq, 0); | 323 | p = create_workqueue_thread(wq, any_online_cpu(cpu_online_map)); |
| 324 | if (!p) | 324 | if (!p) |
| 325 | destroy = 1; | 325 | destroy = 1; |
| 326 | else | 326 | else |
| @@ -374,7 +374,7 @@ void destroy_workqueue(struct workqueue_struct *wq) | |||
| 374 | /* We don't need the distraction of CPUs appearing and vanishing. */ | 374 | /* We don't need the distraction of CPUs appearing and vanishing. */ |
| 375 | lock_cpu_hotplug(); | 375 | lock_cpu_hotplug(); |
| 376 | if (is_single_threaded(wq)) | 376 | if (is_single_threaded(wq)) |
| 377 | cleanup_workqueue_thread(wq, 0); | 377 | cleanup_workqueue_thread(wq, any_online_cpu(cpu_online_map)); |
| 378 | else { | 378 | else { |
| 379 | for_each_online_cpu(cpu) | 379 | for_each_online_cpu(cpu) |
| 380 | cleanup_workqueue_thread(wq, cpu); | 380 | cleanup_workqueue_thread(wq, cpu); |
diff --git a/lib/genalloc.c b/lib/genalloc.c index d6d30d2e7166..9ce0a6a3b85a 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c | |||
| @@ -95,12 +95,10 @@ unsigned long gen_pool_alloc(struct gen_pool *poolp, int size) | |||
| 95 | if (size > max_chunk_size) | 95 | if (size > max_chunk_size) |
| 96 | return 0; | 96 | return 0; |
| 97 | 97 | ||
| 98 | i = 0; | ||
| 99 | |||
| 100 | size = max(size, 1 << ALLOC_MIN_SHIFT); | 98 | size = max(size, 1 << ALLOC_MIN_SHIFT); |
| 101 | s = roundup_pow_of_two(size); | 99 | i = fls(size - 1); |
| 102 | 100 | s = 1 << i; | |
| 103 | j = i; | 101 | j = i -= ALLOC_MIN_SHIFT; |
| 104 | 102 | ||
| 105 | spin_lock_irqsave(&poolp->lock, flags); | 103 | spin_lock_irqsave(&poolp->lock, flags); |
| 106 | while (!h[j].next) { | 104 | while (!h[j].next) { |
| @@ -153,10 +151,10 @@ void gen_pool_free(struct gen_pool *poolp, unsigned long ptr, int size) | |||
| 153 | if (size > max_chunk_size) | 151 | if (size > max_chunk_size) |
| 154 | return; | 152 | return; |
| 155 | 153 | ||
| 156 | i = 0; | ||
| 157 | |||
| 158 | size = max(size, 1 << ALLOC_MIN_SHIFT); | 154 | size = max(size, 1 << ALLOC_MIN_SHIFT); |
| 159 | s = roundup_pow_of_two(size); | 155 | i = fls(size - 1); |
| 156 | s = 1 << i; | ||
| 157 | i -= ALLOC_MIN_SHIFT; | ||
| 160 | 158 | ||
| 161 | a = ptr; | 159 | a = ptr; |
| 162 | 160 | ||
diff --git a/mm/fremap.c b/mm/fremap.c index 007cbad9331e..f851775e09c2 100644 --- a/mm/fremap.c +++ b/mm/fremap.c | |||
| @@ -27,24 +27,20 @@ static int zap_pte(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 27 | struct page *page = NULL; | 27 | struct page *page = NULL; |
| 28 | 28 | ||
| 29 | if (pte_present(pte)) { | 29 | if (pte_present(pte)) { |
| 30 | unsigned long pfn = pte_pfn(pte); | 30 | flush_cache_page(vma, addr, pte_pfn(pte)); |
| 31 | flush_cache_page(vma, addr, pfn); | ||
| 32 | pte = ptep_clear_flush(vma, addr, ptep); | 31 | pte = ptep_clear_flush(vma, addr, ptep); |
| 33 | if (unlikely(!pfn_valid(pfn))) { | 32 | page = vm_normal_page(vma, addr, pte); |
| 34 | print_bad_pte(vma, pte, addr); | 33 | if (page) { |
| 35 | goto out; | 34 | if (pte_dirty(pte)) |
| 35 | set_page_dirty(page); | ||
| 36 | page_remove_rmap(page); | ||
| 37 | page_cache_release(page); | ||
| 36 | } | 38 | } |
| 37 | page = pfn_to_page(pfn); | ||
| 38 | if (pte_dirty(pte)) | ||
| 39 | set_page_dirty(page); | ||
| 40 | page_remove_rmap(page); | ||
| 41 | page_cache_release(page); | ||
| 42 | } else { | 39 | } else { |
| 43 | if (!pte_file(pte)) | 40 | if (!pte_file(pte)) |
| 44 | free_swap_and_cache(pte_to_swp_entry(pte)); | 41 | free_swap_and_cache(pte_to_swp_entry(pte)); |
| 45 | pte_clear(mm, addr, ptep); | 42 | pte_clear(mm, addr, ptep); |
| 46 | } | 43 | } |
| 47 | out: | ||
| 48 | return !!page; | 44 | return !!page; |
| 49 | } | 45 | } |
| 50 | 46 | ||
| @@ -65,8 +61,6 @@ int install_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 65 | pte_t pte_val; | 61 | pte_t pte_val; |
| 66 | spinlock_t *ptl; | 62 | spinlock_t *ptl; |
| 67 | 63 | ||
| 68 | BUG_ON(vma->vm_flags & VM_UNPAGED); | ||
| 69 | |||
| 70 | pgd = pgd_offset(mm, addr); | 64 | pgd = pgd_offset(mm, addr); |
| 71 | pud = pud_alloc(mm, pgd, addr); | 65 | pud = pud_alloc(mm, pgd, addr); |
| 72 | if (!pud) | 66 | if (!pud) |
| @@ -122,8 +116,6 @@ int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 122 | pte_t pte_val; | 116 | pte_t pte_val; |
| 123 | spinlock_t *ptl; | 117 | spinlock_t *ptl; |
| 124 | 118 | ||
| 125 | BUG_ON(vma->vm_flags & VM_UNPAGED); | ||
| 126 | |||
| 127 | pgd = pgd_offset(mm, addr); | 119 | pgd = pgd_offset(mm, addr); |
| 128 | pud = pud_alloc(mm, pgd, addr); | 120 | pud = pud_alloc(mm, pgd, addr); |
| 129 | if (!pud) | 121 | if (!pud) |
diff --git a/mm/madvise.c b/mm/madvise.c index 328a3bcce527..2b7cf0400a21 100644 --- a/mm/madvise.c +++ b/mm/madvise.c | |||
| @@ -126,7 +126,7 @@ static long madvise_dontneed(struct vm_area_struct * vma, | |||
| 126 | unsigned long start, unsigned long end) | 126 | unsigned long start, unsigned long end) |
| 127 | { | 127 | { |
| 128 | *prev = vma; | 128 | *prev = vma; |
| 129 | if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_UNPAGED)) | 129 | if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP)) |
| 130 | return -EINVAL; | 130 | return -EINVAL; |
| 131 | 131 | ||
| 132 | if (unlikely(vma->vm_flags & VM_NONLINEAR)) { | 132 | if (unlikely(vma->vm_flags & VM_NONLINEAR)) { |
diff --git a/mm/memory.c b/mm/memory.c index d1f46f4e4c8a..9ab206b829a2 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
| @@ -333,9 +333,9 @@ static inline void add_mm_rss(struct mm_struct *mm, int file_rss, int anon_rss) | |||
| 333 | } | 333 | } |
| 334 | 334 | ||
| 335 | /* | 335 | /* |
| 336 | * This function is called to print an error when a pte in a | 336 | * This function is called to print an error when a bad pte |
| 337 | * !VM_UNPAGED region is found pointing to an invalid pfn (which | 337 | * is found. For example, we might have a PFN-mapped pte in |
| 338 | * is an error. | 338 | * a region that doesn't allow it. |
| 339 | * | 339 | * |
| 340 | * The calling function must still handle the error. | 340 | * The calling function must still handle the error. |
| 341 | */ | 341 | */ |
| @@ -350,19 +350,56 @@ void print_bad_pte(struct vm_area_struct *vma, pte_t pte, unsigned long vaddr) | |||
| 350 | } | 350 | } |
| 351 | 351 | ||
| 352 | /* | 352 | /* |
| 353 | * page_is_anon applies strict checks for an anonymous page belonging to | 353 | * This function gets the "struct page" associated with a pte. |
| 354 | * this vma at this address. It is used on VM_UNPAGED vmas, which are | 354 | * |
| 355 | * usually populated with shared originals (which must not be counted), | 355 | * NOTE! Some mappings do not have "struct pages". A raw PFN mapping |
| 356 | * but occasionally contain private COWed copies (when !VM_SHARED, or | 356 | * will have each page table entry just pointing to a raw page frame |
| 357 | * perhaps via ptrace when VM_SHARED). An mmap of /dev/mem might window | 357 | * number, and as far as the VM layer is concerned, those do not have |
| 358 | * free pages, pages from other processes, or from other parts of this: | 358 | * pages associated with them - even if the PFN might point to memory |
| 359 | * it's tricky, but try not to be deceived by foreign anonymous pages. | 359 | * that otherwise is perfectly fine and has a "struct page". |
| 360 | * | ||
| 361 | * The way we recognize those mappings is through the rules set up | ||
| 362 | * by "remap_pfn_range()": the vma will have the VM_PFNMAP bit set, | ||
| 363 | * and the vm_pgoff will point to the first PFN mapped: thus every | ||
| 364 | * page that is a raw mapping will always honor the rule | ||
| 365 | * | ||
| 366 | * pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT) | ||
| 367 | * | ||
| 368 | * and if that isn't true, the page has been COW'ed (in which case it | ||
| 369 | * _does_ have a "struct page" associated with it even if it is in a | ||
| 370 | * VM_PFNMAP range). | ||
| 360 | */ | 371 | */ |
| 361 | static inline int page_is_anon(struct page *page, | 372 | struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_t pte) |
| 362 | struct vm_area_struct *vma, unsigned long addr) | ||
| 363 | { | 373 | { |
| 364 | return page && PageAnon(page) && page_mapped(page) && | 374 | unsigned long pfn = pte_pfn(pte); |
| 365 | page_address_in_vma(page, vma) == addr; | 375 | |
| 376 | if (vma->vm_flags & VM_PFNMAP) { | ||
| 377 | unsigned long off = (addr - vma->vm_start) >> PAGE_SHIFT; | ||
| 378 | if (pfn == vma->vm_pgoff + off) | ||
| 379 | return NULL; | ||
| 380 | } | ||
| 381 | |||
| 382 | /* | ||
| 383 | * Add some anal sanity checks for now. Eventually, | ||
| 384 | * we should just do "return pfn_to_page(pfn)", but | ||
| 385 | * in the meantime we check that we get a valid pfn, | ||
| 386 | * and that the resulting page looks ok. | ||
| 387 | * | ||
| 388 | * Remove this test eventually! | ||
| 389 | */ | ||
| 390 | if (unlikely(!pfn_valid(pfn))) { | ||
| 391 | print_bad_pte(vma, pte, addr); | ||
| 392 | return NULL; | ||
| 393 | } | ||
| 394 | |||
| 395 | /* | ||
| 396 | * NOTE! We still have PageReserved() pages in the page | ||
| 397 | * tables. | ||
| 398 | * | ||
| 399 | * The PAGE_ZERO() pages and various VDSO mappings can | ||
| 400 | * cause them to exist. | ||
| 401 | */ | ||
| 402 | return pfn_to_page(pfn); | ||
| 366 | } | 403 | } |
| 367 | 404 | ||
| 368 | /* | 405 | /* |
| @@ -379,7 +416,6 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, | |||
| 379 | unsigned long vm_flags = vma->vm_flags; | 416 | unsigned long vm_flags = vma->vm_flags; |
| 380 | pte_t pte = *src_pte; | 417 | pte_t pte = *src_pte; |
| 381 | struct page *page; | 418 | struct page *page; |
| 382 | unsigned long pfn; | ||
| 383 | 419 | ||
| 384 | /* pte contains position in swap or file, so copy. */ | 420 | /* pte contains position in swap or file, so copy. */ |
| 385 | if (unlikely(!pte_present(pte))) { | 421 | if (unlikely(!pte_present(pte))) { |
| @@ -397,22 +433,6 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, | |||
| 397 | goto out_set_pte; | 433 | goto out_set_pte; |
| 398 | } | 434 | } |
| 399 | 435 | ||
| 400 | pfn = pte_pfn(pte); | ||
| 401 | page = pfn_valid(pfn)? pfn_to_page(pfn): NULL; | ||
| 402 | |||
| 403 | if (unlikely(vm_flags & VM_UNPAGED)) | ||
| 404 | if (!page_is_anon(page, vma, addr)) | ||
| 405 | goto out_set_pte; | ||
| 406 | |||
| 407 | /* | ||
| 408 | * If the pte points outside of valid memory but | ||
| 409 | * the region is not VM_UNPAGED, we have a problem. | ||
| 410 | */ | ||
| 411 | if (unlikely(!page)) { | ||
| 412 | print_bad_pte(vma, pte, addr); | ||
| 413 | goto out_set_pte; /* try to do something sane */ | ||
| 414 | } | ||
| 415 | |||
| 416 | /* | 436 | /* |
| 417 | * If it's a COW mapping, write protect it both | 437 | * If it's a COW mapping, write protect it both |
| 418 | * in the parent and the child | 438 | * in the parent and the child |
| @@ -429,9 +449,13 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, | |||
| 429 | if (vm_flags & VM_SHARED) | 449 | if (vm_flags & VM_SHARED) |
| 430 | pte = pte_mkclean(pte); | 450 | pte = pte_mkclean(pte); |
| 431 | pte = pte_mkold(pte); | 451 | pte = pte_mkold(pte); |
| 432 | get_page(page); | 452 | |
| 433 | page_dup_rmap(page); | 453 | page = vm_normal_page(vma, addr, pte); |
| 434 | rss[!!PageAnon(page)]++; | 454 | if (page) { |
| 455 | get_page(page); | ||
| 456 | page_dup_rmap(page); | ||
| 457 | rss[!!PageAnon(page)]++; | ||
| 458 | } | ||
| 435 | 459 | ||
| 436 | out_set_pte: | 460 | out_set_pte: |
| 437 | set_pte_at(dst_mm, addr, dst_pte, pte); | 461 | set_pte_at(dst_mm, addr, dst_pte, pte); |
| @@ -543,7 +567,7 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, | |||
| 543 | * readonly mappings. The tradeoff is that copy_page_range is more | 567 | * readonly mappings. The tradeoff is that copy_page_range is more |
| 544 | * efficient than faulting. | 568 | * efficient than faulting. |
| 545 | */ | 569 | */ |
| 546 | if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_UNPAGED))) { | 570 | if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_PFNMAP))) { |
| 547 | if (!vma->anon_vma) | 571 | if (!vma->anon_vma) |
| 548 | return 0; | 572 | return 0; |
| 549 | } | 573 | } |
| @@ -584,19 +608,10 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, | |||
| 584 | } | 608 | } |
| 585 | if (pte_present(ptent)) { | 609 | if (pte_present(ptent)) { |
| 586 | struct page *page; | 610 | struct page *page; |
| 587 | unsigned long pfn; | ||
| 588 | 611 | ||
| 589 | (*zap_work) -= PAGE_SIZE; | 612 | (*zap_work) -= PAGE_SIZE; |
| 590 | 613 | ||
| 591 | pfn = pte_pfn(ptent); | 614 | page = vm_normal_page(vma, addr, ptent); |
| 592 | page = pfn_valid(pfn)? pfn_to_page(pfn): NULL; | ||
| 593 | |||
| 594 | if (unlikely(vma->vm_flags & VM_UNPAGED)) { | ||
| 595 | if (!page_is_anon(page, vma, addr)) | ||
| 596 | page = NULL; | ||
| 597 | } else if (unlikely(!page)) | ||
| 598 | print_bad_pte(vma, ptent, addr); | ||
| 599 | |||
| 600 | if (unlikely(details) && page) { | 615 | if (unlikely(details) && page) { |
| 601 | /* | 616 | /* |
| 602 | * unmap_shared_mapping_pages() wants to | 617 | * unmap_shared_mapping_pages() wants to |
| @@ -852,7 +867,7 @@ unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address, | |||
| 852 | /* | 867 | /* |
| 853 | * Do a quick page-table lookup for a single page. | 868 | * Do a quick page-table lookup for a single page. |
| 854 | */ | 869 | */ |
| 855 | struct page *follow_page(struct mm_struct *mm, unsigned long address, | 870 | struct page *follow_page(struct vm_area_struct *vma, unsigned long address, |
| 856 | unsigned int flags) | 871 | unsigned int flags) |
| 857 | { | 872 | { |
| 858 | pgd_t *pgd; | 873 | pgd_t *pgd; |
| @@ -860,8 +875,8 @@ struct page *follow_page(struct mm_struct *mm, unsigned long address, | |||
| 860 | pmd_t *pmd; | 875 | pmd_t *pmd; |
| 861 | pte_t *ptep, pte; | 876 | pte_t *ptep, pte; |
| 862 | spinlock_t *ptl; | 877 | spinlock_t *ptl; |
| 863 | unsigned long pfn; | ||
| 864 | struct page *page; | 878 | struct page *page; |
| 879 | struct mm_struct *mm = vma->vm_mm; | ||
| 865 | 880 | ||
| 866 | page = follow_huge_addr(mm, address, flags & FOLL_WRITE); | 881 | page = follow_huge_addr(mm, address, flags & FOLL_WRITE); |
| 867 | if (!IS_ERR(page)) { | 882 | if (!IS_ERR(page)) { |
| @@ -897,11 +912,10 @@ struct page *follow_page(struct mm_struct *mm, unsigned long address, | |||
| 897 | goto unlock; | 912 | goto unlock; |
| 898 | if ((flags & FOLL_WRITE) && !pte_write(pte)) | 913 | if ((flags & FOLL_WRITE) && !pte_write(pte)) |
| 899 | goto unlock; | 914 | goto unlock; |
| 900 | pfn = pte_pfn(pte); | 915 | page = vm_normal_page(vma, address, pte); |
| 901 | if (!pfn_valid(pfn)) | 916 | if (unlikely(!page)) |
| 902 | goto unlock; | 917 | goto unlock; |
| 903 | 918 | ||
| 904 | page = pfn_to_page(pfn); | ||
| 905 | if (flags & FOLL_GET) | 919 | if (flags & FOLL_GET) |
| 906 | get_page(page); | 920 | get_page(page); |
| 907 | if (flags & FOLL_TOUCH) { | 921 | if (flags & FOLL_TOUCH) { |
| @@ -974,8 +988,10 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
| 974 | return i ? : -EFAULT; | 988 | return i ? : -EFAULT; |
| 975 | } | 989 | } |
| 976 | if (pages) { | 990 | if (pages) { |
| 977 | pages[i] = pte_page(*pte); | 991 | struct page *page = vm_normal_page(vma, start, *pte); |
| 978 | get_page(pages[i]); | 992 | pages[i] = page; |
| 993 | if (page) | ||
| 994 | get_page(page); | ||
| 979 | } | 995 | } |
| 980 | pte_unmap(pte); | 996 | pte_unmap(pte); |
| 981 | if (vmas) | 997 | if (vmas) |
| @@ -1010,7 +1026,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
| 1010 | foll_flags |= FOLL_WRITE; | 1026 | foll_flags |= FOLL_WRITE; |
| 1011 | 1027 | ||
| 1012 | cond_resched(); | 1028 | cond_resched(); |
| 1013 | while (!(page = follow_page(mm, start, foll_flags))) { | 1029 | while (!(page = follow_page(vma, start, foll_flags))) { |
| 1014 | int ret; | 1030 | int ret; |
| 1015 | ret = __handle_mm_fault(mm, vma, start, | 1031 | ret = __handle_mm_fault(mm, vma, start, |
| 1016 | foll_flags & FOLL_WRITE); | 1032 | foll_flags & FOLL_WRITE); |
| @@ -1214,11 +1230,12 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, | |||
| 1214 | * in 2.6 the LRU scan won't even find its pages, so this | 1230 | * in 2.6 the LRU scan won't even find its pages, so this |
| 1215 | * flag means no more than count its pages in reserved_vm, | 1231 | * flag means no more than count its pages in reserved_vm, |
| 1216 | * and omit it from core dump, even when VM_IO turned off. | 1232 | * and omit it from core dump, even when VM_IO turned off. |
| 1217 | * VM_UNPAGED tells the core MM not to "manage" these pages | 1233 | * VM_PFNMAP tells the core MM that the base pages are just |
| 1218 | * (e.g. refcount, mapcount, try to swap them out): in | 1234 | * raw PFN mappings, and do not have a "struct page" associated |
| 1219 | * particular, zap_pte_range does not try to free them. | 1235 | * with them. |
| 1220 | */ | 1236 | */ |
| 1221 | vma->vm_flags |= VM_IO | VM_RESERVED | VM_UNPAGED; | 1237 | vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; |
| 1238 | vma->vm_pgoff = pfn; | ||
| 1222 | 1239 | ||
| 1223 | BUG_ON(addr >= end); | 1240 | BUG_ON(addr >= end); |
| 1224 | pfn -= addr >> PAGE_SHIFT; | 1241 | pfn -= addr >> PAGE_SHIFT; |
| @@ -1273,6 +1290,26 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma) | |||
| 1273 | return pte; | 1290 | return pte; |
| 1274 | } | 1291 | } |
| 1275 | 1292 | ||
| 1293 | static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va) | ||
| 1294 | { | ||
| 1295 | /* | ||
| 1296 | * If the source page was a PFN mapping, we don't have | ||
| 1297 | * a "struct page" for it. We do a best-effort copy by | ||
| 1298 | * just copying from the original user address. If that | ||
| 1299 | * fails, we just zero-fill it. Live with it. | ||
| 1300 | */ | ||
| 1301 | if (unlikely(!src)) { | ||
| 1302 | void *kaddr = kmap_atomic(dst, KM_USER0); | ||
| 1303 | unsigned long left = __copy_from_user_inatomic(kaddr, (void __user *)va, PAGE_SIZE); | ||
| 1304 | if (left) | ||
| 1305 | memset(kaddr, 0, PAGE_SIZE); | ||
| 1306 | kunmap_atomic(kaddr, KM_USER0); | ||
| 1307 | return; | ||
| 1308 | |||
| 1309 | } | ||
| 1310 | copy_user_highpage(dst, src, va); | ||
| 1311 | } | ||
| 1312 | |||
| 1276 | /* | 1313 | /* |
| 1277 | * This routine handles present pages, when users try to write | 1314 | * This routine handles present pages, when users try to write |
| 1278 | * to a shared page. It is done by copying the page to a new address | 1315 | * to a shared page. It is done by copying the page to a new address |
| @@ -1296,28 +1333,13 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 1296 | spinlock_t *ptl, pte_t orig_pte) | 1333 | spinlock_t *ptl, pte_t orig_pte) |
| 1297 | { | 1334 | { |
| 1298 | struct page *old_page, *src_page, *new_page; | 1335 | struct page *old_page, *src_page, *new_page; |
| 1299 | unsigned long pfn = pte_pfn(orig_pte); | ||
| 1300 | pte_t entry; | 1336 | pte_t entry; |
| 1301 | int ret = VM_FAULT_MINOR; | 1337 | int ret = VM_FAULT_MINOR; |
| 1302 | 1338 | ||
| 1303 | if (unlikely(!pfn_valid(pfn))) { | 1339 | old_page = vm_normal_page(vma, address, orig_pte); |
| 1304 | /* | ||
| 1305 | * Page table corrupted: show pte and kill process. | ||
| 1306 | * Or it's an attempt to COW an out-of-map VM_UNPAGED | ||
| 1307 | * entry, which copy_user_highpage does not support. | ||
| 1308 | */ | ||
| 1309 | print_bad_pte(vma, orig_pte, address); | ||
| 1310 | ret = VM_FAULT_OOM; | ||
| 1311 | goto unlock; | ||
| 1312 | } | ||
| 1313 | old_page = pfn_to_page(pfn); | ||
| 1314 | src_page = old_page; | 1340 | src_page = old_page; |
| 1315 | 1341 | if (!old_page) | |
| 1316 | if (unlikely(vma->vm_flags & VM_UNPAGED)) | 1342 | goto gotten; |
| 1317 | if (!page_is_anon(old_page, vma, address)) { | ||
| 1318 | old_page = NULL; | ||
| 1319 | goto gotten; | ||
| 1320 | } | ||
| 1321 | 1343 | ||
| 1322 | if (PageAnon(old_page) && !TestSetPageLocked(old_page)) { | 1344 | if (PageAnon(old_page) && !TestSetPageLocked(old_page)) { |
| 1323 | int reuse = can_share_swap_page(old_page); | 1345 | int reuse = can_share_swap_page(old_page); |
| @@ -1351,7 +1373,7 @@ gotten: | |||
| 1351 | new_page = alloc_page_vma(GFP_HIGHUSER, vma, address); | 1373 | new_page = alloc_page_vma(GFP_HIGHUSER, vma, address); |
| 1352 | if (!new_page) | 1374 | if (!new_page) |
| 1353 | goto oom; | 1375 | goto oom; |
| 1354 | copy_user_highpage(new_page, src_page, address); | 1376 | cow_user_page(new_page, src_page, address); |
| 1355 | } | 1377 | } |
| 1356 | 1378 | ||
| 1357 | /* | 1379 | /* |
| @@ -1812,16 +1834,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 1812 | spinlock_t *ptl; | 1834 | spinlock_t *ptl; |
| 1813 | pte_t entry; | 1835 | pte_t entry; |
| 1814 | 1836 | ||
| 1815 | /* | 1837 | if (write_access) { |
| 1816 | * A VM_UNPAGED vma will normally be filled with present ptes | ||
| 1817 | * by remap_pfn_range, and never arrive here; but it might have | ||
| 1818 | * holes, or if !VM_DONTEXPAND, mremap might have expanded it. | ||
| 1819 | * It's weird enough handling anon pages in unpaged vmas, we do | ||
| 1820 | * not want to worry about ZERO_PAGEs too (it may or may not | ||
| 1821 | * matter if their counts wrap): just give them anon pages. | ||
| 1822 | */ | ||
| 1823 | |||
| 1824 | if (write_access || (vma->vm_flags & VM_UNPAGED)) { | ||
| 1825 | /* Allocate our own private page. */ | 1838 | /* Allocate our own private page. */ |
| 1826 | pte_unmap(page_table); | 1839 | pte_unmap(page_table); |
| 1827 | 1840 | ||
| @@ -1896,8 +1909,6 @@ static int do_no_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 1896 | int anon = 0; | 1909 | int anon = 0; |
| 1897 | 1910 | ||
| 1898 | pte_unmap(page_table); | 1911 | pte_unmap(page_table); |
| 1899 | BUG_ON(vma->vm_flags & VM_UNPAGED); | ||
| 1900 | |||
| 1901 | if (vma->vm_file) { | 1912 | if (vma->vm_file) { |
| 1902 | mapping = vma->vm_file->f_mapping; | 1913 | mapping = vma->vm_file->f_mapping; |
| 1903 | sequence = mapping->truncate_count; | 1914 | sequence = mapping->truncate_count; |
| @@ -1930,7 +1941,7 @@ retry: | |||
| 1930 | page = alloc_page_vma(GFP_HIGHUSER, vma, address); | 1941 | page = alloc_page_vma(GFP_HIGHUSER, vma, address); |
| 1931 | if (!page) | 1942 | if (!page) |
| 1932 | goto oom; | 1943 | goto oom; |
| 1933 | copy_user_highpage(page, new_page, address); | 1944 | cow_user_page(page, new_page, address); |
| 1934 | page_cache_release(new_page); | 1945 | page_cache_release(new_page); |
| 1935 | new_page = page; | 1946 | new_page = page; |
| 1936 | anon = 1; | 1947 | anon = 1; |
| @@ -2149,6 +2160,12 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) | |||
| 2149 | spin_unlock(&mm->page_table_lock); | 2160 | spin_unlock(&mm->page_table_lock); |
| 2150 | return 0; | 2161 | return 0; |
| 2151 | } | 2162 | } |
| 2163 | #else | ||
| 2164 | /* Workaround for gcc 2.96 */ | ||
| 2165 | int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address) | ||
| 2166 | { | ||
| 2167 | return 0; | ||
| 2168 | } | ||
| 2152 | #endif /* __PAGETABLE_PUD_FOLDED */ | 2169 | #endif /* __PAGETABLE_PUD_FOLDED */ |
| 2153 | 2170 | ||
| 2154 | #ifndef __PAGETABLE_PMD_FOLDED | 2171 | #ifndef __PAGETABLE_PMD_FOLDED |
| @@ -2177,6 +2194,12 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) | |||
| 2177 | spin_unlock(&mm->page_table_lock); | 2194 | spin_unlock(&mm->page_table_lock); |
| 2178 | return 0; | 2195 | return 0; |
| 2179 | } | 2196 | } |
| 2197 | #else | ||
| 2198 | /* Workaround for gcc 2.96 */ | ||
| 2199 | int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address) | ||
| 2200 | { | ||
| 2201 | return 0; | ||
| 2202 | } | ||
| 2180 | #endif /* __PAGETABLE_PMD_FOLDED */ | 2203 | #endif /* __PAGETABLE_PMD_FOLDED */ |
| 2181 | 2204 | ||
| 2182 | int make_pages_present(unsigned long addr, unsigned long end) | 2205 | int make_pages_present(unsigned long addr, unsigned long end) |
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 5609a31bdf22..bec88c81244e 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
| @@ -189,17 +189,15 @@ static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd, | |||
| 189 | 189 | ||
| 190 | orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); | 190 | orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); |
| 191 | do { | 191 | do { |
| 192 | unsigned long pfn; | 192 | struct page *page; |
| 193 | unsigned int nid; | 193 | unsigned int nid; |
| 194 | 194 | ||
| 195 | if (!pte_present(*pte)) | 195 | if (!pte_present(*pte)) |
| 196 | continue; | 196 | continue; |
| 197 | pfn = pte_pfn(*pte); | 197 | page = vm_normal_page(vma, addr, *pte); |
| 198 | if (!pfn_valid(pfn)) { | 198 | if (!page) |
| 199 | print_bad_pte(vma, *pte, addr); | ||
| 200 | continue; | 199 | continue; |
| 201 | } | 200 | nid = page_to_nid(page); |
| 202 | nid = pfn_to_nid(pfn); | ||
| 203 | if (!node_isset(nid, *nodes)) | 201 | if (!node_isset(nid, *nodes)) |
| 204 | break; | 202 | break; |
| 205 | } while (pte++, addr += PAGE_SIZE, addr != end); | 203 | } while (pte++, addr += PAGE_SIZE, addr != end); |
| @@ -269,8 +267,6 @@ check_range(struct mm_struct *mm, unsigned long start, unsigned long end, | |||
| 269 | first = find_vma(mm, start); | 267 | first = find_vma(mm, start); |
| 270 | if (!first) | 268 | if (!first) |
| 271 | return ERR_PTR(-EFAULT); | 269 | return ERR_PTR(-EFAULT); |
| 272 | if (first->vm_flags & VM_UNPAGED) | ||
| 273 | return ERR_PTR(-EACCES); | ||
| 274 | prev = NULL; | 270 | prev = NULL; |
| 275 | for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) { | 271 | for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) { |
| 276 | if (!vma->vm_next && vma->vm_end < end) | 272 | if (!vma->vm_next && vma->vm_end < end) |
diff --git a/mm/msync.c b/mm/msync.c index b3f4caf3010b..1b5b6f662dcf 100644 --- a/mm/msync.c +++ b/mm/msync.c | |||
| @@ -27,7 +27,6 @@ static void msync_pte_range(struct vm_area_struct *vma, pmd_t *pmd, | |||
| 27 | again: | 27 | again: |
| 28 | pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); | 28 | pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); |
| 29 | do { | 29 | do { |
| 30 | unsigned long pfn; | ||
| 31 | struct page *page; | 30 | struct page *page; |
| 32 | 31 | ||
| 33 | if (progress >= 64) { | 32 | if (progress >= 64) { |
| @@ -40,13 +39,9 @@ again: | |||
| 40 | continue; | 39 | continue; |
| 41 | if (!pte_maybe_dirty(*pte)) | 40 | if (!pte_maybe_dirty(*pte)) |
| 42 | continue; | 41 | continue; |
| 43 | pfn = pte_pfn(*pte); | 42 | page = vm_normal_page(vma, addr, *pte); |
| 44 | if (unlikely(!pfn_valid(pfn))) { | 43 | if (!page) |
| 45 | print_bad_pte(vma, *pte, addr); | ||
| 46 | continue; | 44 | continue; |
| 47 | } | ||
| 48 | page = pfn_to_page(pfn); | ||
| 49 | |||
| 50 | if (ptep_clear_flush_dirty(vma, addr, pte) || | 45 | if (ptep_clear_flush_dirty(vma, addr, pte) || |
| 51 | page_test_and_clear_dirty(page)) | 46 | page_test_and_clear_dirty(page)) |
| 52 | set_page_dirty(page); | 47 | set_page_dirty(page); |
| @@ -97,9 +92,8 @@ static void msync_page_range(struct vm_area_struct *vma, | |||
| 97 | /* For hugepages we can't go walking the page table normally, | 92 | /* For hugepages we can't go walking the page table normally, |
| 98 | * but that's ok, hugetlbfs is memory based, so we don't need | 93 | * but that's ok, hugetlbfs is memory based, so we don't need |
| 99 | * to do anything more on an msync(). | 94 | * to do anything more on an msync(). |
| 100 | * Can't do anything with VM_UNPAGED regions either. | ||
| 101 | */ | 95 | */ |
| 102 | if (vma->vm_flags & (VM_HUGETLB|VM_UNPAGED)) | 96 | if (vma->vm_flags & VM_HUGETLB) |
| 103 | return; | 97 | return; |
| 104 | 98 | ||
| 105 | BUG_ON(addr >= end); | 99 | BUG_ON(addr >= end); |
diff --git a/mm/nommu.c b/mm/nommu.c index 6deb6ab3d6ad..c1196812876b 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
| @@ -1045,7 +1045,7 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) | |||
| 1045 | 1045 | ||
| 1046 | EXPORT_SYMBOL(find_vma); | 1046 | EXPORT_SYMBOL(find_vma); |
| 1047 | 1047 | ||
| 1048 | struct page *follow_page(struct mm_struct *mm, unsigned long address, | 1048 | struct page *follow_page(struct vm_area_struct *vma, unsigned long address, |
| 1049 | unsigned int foll_flags) | 1049 | unsigned int foll_flags) |
| 1050 | { | 1050 | { |
| 1051 | return NULL; | 1051 | return NULL; |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 1731236dec35..b257720edfc8 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
| @@ -773,9 +773,12 @@ again: | |||
| 773 | } | 773 | } |
| 774 | 774 | ||
| 775 | #define ALLOC_NO_WATERMARKS 0x01 /* don't check watermarks at all */ | 775 | #define ALLOC_NO_WATERMARKS 0x01 /* don't check watermarks at all */ |
| 776 | #define ALLOC_HARDER 0x02 /* try to alloc harder */ | 776 | #define ALLOC_WMARK_MIN 0x02 /* use pages_min watermark */ |
| 777 | #define ALLOC_HIGH 0x04 /* __GFP_HIGH set */ | 777 | #define ALLOC_WMARK_LOW 0x04 /* use pages_low watermark */ |
| 778 | #define ALLOC_CPUSET 0x08 /* check for correct cpuset */ | 778 | #define ALLOC_WMARK_HIGH 0x08 /* use pages_high watermark */ |
| 779 | #define ALLOC_HARDER 0x10 /* try to alloc harder */ | ||
| 780 | #define ALLOC_HIGH 0x20 /* __GFP_HIGH set */ | ||
| 781 | #define ALLOC_CPUSET 0x40 /* check for correct cpuset */ | ||
| 779 | 782 | ||
| 780 | /* | 783 | /* |
| 781 | * Return 1 if free pages are above 'mark'. This takes into account the order | 784 | * Return 1 if free pages are above 'mark'. This takes into account the order |
| @@ -830,7 +833,14 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, | |||
| 830 | continue; | 833 | continue; |
| 831 | 834 | ||
| 832 | if (!(alloc_flags & ALLOC_NO_WATERMARKS)) { | 835 | if (!(alloc_flags & ALLOC_NO_WATERMARKS)) { |
| 833 | if (!zone_watermark_ok(*z, order, (*z)->pages_low, | 836 | unsigned long mark; |
| 837 | if (alloc_flags & ALLOC_WMARK_MIN) | ||
| 838 | mark = (*z)->pages_min; | ||
| 839 | else if (alloc_flags & ALLOC_WMARK_LOW) | ||
| 840 | mark = (*z)->pages_low; | ||
| 841 | else | ||
| 842 | mark = (*z)->pages_high; | ||
| 843 | if (!zone_watermark_ok(*z, order, mark, | ||
| 834 | classzone_idx, alloc_flags)) | 844 | classzone_idx, alloc_flags)) |
| 835 | continue; | 845 | continue; |
| 836 | } | 846 | } |
| @@ -871,7 +881,7 @@ restart: | |||
| 871 | } | 881 | } |
| 872 | 882 | ||
| 873 | page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order, | 883 | page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order, |
| 874 | zonelist, ALLOC_CPUSET); | 884 | zonelist, ALLOC_WMARK_LOW|ALLOC_CPUSET); |
| 875 | if (page) | 885 | if (page) |
| 876 | goto got_pg; | 886 | goto got_pg; |
| 877 | 887 | ||
| @@ -888,7 +898,7 @@ restart: | |||
| 888 | * cannot run direct reclaim, or if the caller has realtime scheduling | 898 | * cannot run direct reclaim, or if the caller has realtime scheduling |
| 889 | * policy. | 899 | * policy. |
| 890 | */ | 900 | */ |
| 891 | alloc_flags = 0; | 901 | alloc_flags = ALLOC_WMARK_MIN; |
| 892 | if ((unlikely(rt_task(p)) && !in_interrupt()) || !wait) | 902 | if ((unlikely(rt_task(p)) && !in_interrupt()) || !wait) |
| 893 | alloc_flags |= ALLOC_HARDER; | 903 | alloc_flags |= ALLOC_HARDER; |
| 894 | if (gfp_mask & __GFP_HIGH) | 904 | if (gfp_mask & __GFP_HIGH) |
| @@ -959,7 +969,7 @@ rebalance: | |||
| 959 | * under heavy pressure. | 969 | * under heavy pressure. |
| 960 | */ | 970 | */ |
| 961 | page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order, | 971 | page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order, |
| 962 | zonelist, ALLOC_CPUSET); | 972 | zonelist, ALLOC_WMARK_HIGH|ALLOC_CPUSET); |
| 963 | if (page) | 973 | if (page) |
| 964 | goto got_pg; | 974 | goto got_pg; |
| 965 | 975 | ||
| @@ -226,8 +226,6 @@ vma_address(struct page *page, struct vm_area_struct *vma) | |||
| 226 | /* | 226 | /* |
| 227 | * At what user virtual address is page expected in vma? checking that the | 227 | * At what user virtual address is page expected in vma? checking that the |
| 228 | * page matches the vma: currently only used on anon pages, by unuse_vma; | 228 | * page matches the vma: currently only used on anon pages, by unuse_vma; |
| 229 | * and by extraordinary checks on anon pages in VM_UNPAGED vmas, taking | ||
| 230 | * care that an mmap of /dev/mem might window free and foreign pages. | ||
| 231 | */ | 229 | */ |
| 232 | unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) | 230 | unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) |
| 233 | { | 231 | { |
| @@ -292,7 +290,7 @@ pte_t *page_check_address(struct page *page, struct mm_struct *mm, | |||
| 292 | * repeatedly from either page_referenced_anon or page_referenced_file. | 290 | * repeatedly from either page_referenced_anon or page_referenced_file. |
| 293 | */ | 291 | */ |
| 294 | static int page_referenced_one(struct page *page, | 292 | static int page_referenced_one(struct page *page, |
| 295 | struct vm_area_struct *vma, unsigned int *mapcount, int ignore_token) | 293 | struct vm_area_struct *vma, unsigned int *mapcount) |
| 296 | { | 294 | { |
| 297 | struct mm_struct *mm = vma->vm_mm; | 295 | struct mm_struct *mm = vma->vm_mm; |
| 298 | unsigned long address; | 296 | unsigned long address; |
| @@ -313,7 +311,7 @@ static int page_referenced_one(struct page *page, | |||
| 313 | 311 | ||
| 314 | /* Pretend the page is referenced if the task has the | 312 | /* Pretend the page is referenced if the task has the |
| 315 | swap token and is in the middle of a page fault. */ | 313 | swap token and is in the middle of a page fault. */ |
| 316 | if (mm != current->mm && !ignore_token && has_swap_token(mm) && | 314 | if (mm != current->mm && has_swap_token(mm) && |
| 317 | rwsem_is_locked(&mm->mmap_sem)) | 315 | rwsem_is_locked(&mm->mmap_sem)) |
| 318 | referenced++; | 316 | referenced++; |
| 319 | 317 | ||
| @@ -323,7 +321,7 @@ out: | |||
| 323 | return referenced; | 321 | return referenced; |
| 324 | } | 322 | } |
| 325 | 323 | ||
| 326 | static int page_referenced_anon(struct page *page, int ignore_token) | 324 | static int page_referenced_anon(struct page *page) |
| 327 | { | 325 | { |
| 328 | unsigned int mapcount; | 326 | unsigned int mapcount; |
| 329 | struct anon_vma *anon_vma; | 327 | struct anon_vma *anon_vma; |
| @@ -336,8 +334,7 @@ static int page_referenced_anon(struct page *page, int ignore_token) | |||
| 336 | 334 | ||
| 337 | mapcount = page_mapcount(page); | 335 | mapcount = page_mapcount(page); |
| 338 | list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { | 336 | list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { |
| 339 | referenced += page_referenced_one(page, vma, &mapcount, | 337 | referenced += page_referenced_one(page, vma, &mapcount); |
| 340 | ignore_token); | ||
| 341 | if (!mapcount) | 338 | if (!mapcount) |
| 342 | break; | 339 | break; |
| 343 | } | 340 | } |
| @@ -356,7 +353,7 @@ static int page_referenced_anon(struct page *page, int ignore_token) | |||
| 356 | * | 353 | * |
| 357 | * This function is only called from page_referenced for object-based pages. | 354 | * This function is only called from page_referenced for object-based pages. |
| 358 | */ | 355 | */ |
| 359 | static int page_referenced_file(struct page *page, int ignore_token) | 356 | static int page_referenced_file(struct page *page) |
| 360 | { | 357 | { |
| 361 | unsigned int mapcount; | 358 | unsigned int mapcount; |
| 362 | struct address_space *mapping = page->mapping; | 359 | struct address_space *mapping = page->mapping; |
| @@ -394,8 +391,7 @@ static int page_referenced_file(struct page *page, int ignore_token) | |||
| 394 | referenced++; | 391 | referenced++; |
| 395 | break; | 392 | break; |
| 396 | } | 393 | } |
| 397 | referenced += page_referenced_one(page, vma, &mapcount, | 394 | referenced += page_referenced_one(page, vma, &mapcount); |
| 398 | ignore_token); | ||
| 399 | if (!mapcount) | 395 | if (!mapcount) |
| 400 | break; | 396 | break; |
| 401 | } | 397 | } |
| @@ -412,13 +408,10 @@ static int page_referenced_file(struct page *page, int ignore_token) | |||
| 412 | * Quick test_and_clear_referenced for all mappings to a page, | 408 | * Quick test_and_clear_referenced for all mappings to a page, |
| 413 | * returns the number of ptes which referenced the page. | 409 | * returns the number of ptes which referenced the page. |
| 414 | */ | 410 | */ |
| 415 | int page_referenced(struct page *page, int is_locked, int ignore_token) | 411 | int page_referenced(struct page *page, int is_locked) |
| 416 | { | 412 | { |
| 417 | int referenced = 0; | 413 | int referenced = 0; |
| 418 | 414 | ||
| 419 | if (!swap_token_default_timeout) | ||
| 420 | ignore_token = 1; | ||
| 421 | |||
| 422 | if (page_test_and_clear_young(page)) | 415 | if (page_test_and_clear_young(page)) |
| 423 | referenced++; | 416 | referenced++; |
| 424 | 417 | ||
| @@ -427,15 +420,14 @@ int page_referenced(struct page *page, int is_locked, int ignore_token) | |||
| 427 | 420 | ||
| 428 | if (page_mapped(page) && page->mapping) { | 421 | if (page_mapped(page) && page->mapping) { |
| 429 | if (PageAnon(page)) | 422 | if (PageAnon(page)) |
| 430 | referenced += page_referenced_anon(page, ignore_token); | 423 | referenced += page_referenced_anon(page); |
| 431 | else if (is_locked) | 424 | else if (is_locked) |
| 432 | referenced += page_referenced_file(page, ignore_token); | 425 | referenced += page_referenced_file(page); |
| 433 | else if (TestSetPageLocked(page)) | 426 | else if (TestSetPageLocked(page)) |
| 434 | referenced++; | 427 | referenced++; |
| 435 | else { | 428 | else { |
| 436 | if (page->mapping) | 429 | if (page->mapping) |
| 437 | referenced += page_referenced_file(page, | 430 | referenced += page_referenced_file(page); |
| 438 | ignore_token); | ||
| 439 | unlock_page(page); | 431 | unlock_page(page); |
| 440 | } | 432 | } |
| 441 | } | 433 | } |
| @@ -614,7 +606,6 @@ static void try_to_unmap_cluster(unsigned long cursor, | |||
| 614 | struct page *page; | 606 | struct page *page; |
| 615 | unsigned long address; | 607 | unsigned long address; |
| 616 | unsigned long end; | 608 | unsigned long end; |
| 617 | unsigned long pfn; | ||
| 618 | 609 | ||
| 619 | address = (vma->vm_start + cursor) & CLUSTER_MASK; | 610 | address = (vma->vm_start + cursor) & CLUSTER_MASK; |
| 620 | end = address + CLUSTER_SIZE; | 611 | end = address + CLUSTER_SIZE; |
| @@ -643,15 +634,8 @@ static void try_to_unmap_cluster(unsigned long cursor, | |||
| 643 | for (; address < end; pte++, address += PAGE_SIZE) { | 634 | for (; address < end; pte++, address += PAGE_SIZE) { |
| 644 | if (!pte_present(*pte)) | 635 | if (!pte_present(*pte)) |
| 645 | continue; | 636 | continue; |
| 646 | 637 | page = vm_normal_page(vma, address, *pte); | |
| 647 | pfn = pte_pfn(*pte); | 638 | BUG_ON(!page || PageAnon(page)); |
| 648 | if (unlikely(!pfn_valid(pfn))) { | ||
| 649 | print_bad_pte(vma, *pte, address); | ||
| 650 | continue; | ||
| 651 | } | ||
| 652 | |||
| 653 | page = pfn_to_page(pfn); | ||
| 654 | BUG_ON(PageAnon(page)); | ||
| 655 | 639 | ||
| 656 | if (ptep_clear_flush_young(vma, address, pte)) | 640 | if (ptep_clear_flush_young(vma, address, pte)) |
| 657 | continue; | 641 | continue; |
diff --git a/mm/thrash.c b/mm/thrash.c index eff3c18c33a1..f4c560b4a2b7 100644 --- a/mm/thrash.c +++ b/mm/thrash.c | |||
| @@ -57,14 +57,17 @@ void grab_swap_token(void) | |||
| 57 | /* We have the token. Let others know we still need it. */ | 57 | /* We have the token. Let others know we still need it. */ |
| 58 | if (has_swap_token(current->mm)) { | 58 | if (has_swap_token(current->mm)) { |
| 59 | current->mm->recent_pagein = 1; | 59 | current->mm->recent_pagein = 1; |
| 60 | if (unlikely(!swap_token_default_timeout)) | ||
| 61 | disable_swap_token(); | ||
| 60 | return; | 62 | return; |
| 61 | } | 63 | } |
| 62 | 64 | ||
| 63 | if (time_after(jiffies, swap_token_check)) { | 65 | if (time_after(jiffies, swap_token_check)) { |
| 64 | 66 | ||
| 65 | /* Can't get swapout protection if we exceed our RSS limit. */ | 67 | if (!swap_token_default_timeout) { |
| 66 | // if (current->mm->rss > current->mm->rlimit_rss) | 68 | swap_token_check = jiffies + SWAP_TOKEN_CHECK_INTERVAL; |
| 67 | // return; | 69 | return; |
| 70 | } | ||
| 68 | 71 | ||
| 69 | /* ... or if we recently held the token. */ | 72 | /* ... or if we recently held the token. */ |
| 70 | if (time_before(jiffies, current->mm->swap_token_time)) | 73 | if (time_before(jiffies, current->mm->swap_token_time)) |
| @@ -95,6 +98,7 @@ void __put_swap_token(struct mm_struct *mm) | |||
| 95 | { | 98 | { |
| 96 | spin_lock(&swap_token_lock); | 99 | spin_lock(&swap_token_lock); |
| 97 | if (likely(mm == swap_token_mm)) { | 100 | if (likely(mm == swap_token_mm)) { |
| 101 | mm->swap_token_time = jiffies + SWAP_TOKEN_CHECK_INTERVAL; | ||
| 98 | swap_token_mm = &init_mm; | 102 | swap_token_mm = &init_mm; |
| 99 | swap_token_check = jiffies; | 103 | swap_token_check = jiffies; |
| 100 | } | 104 | } |
diff --git a/mm/vmscan.c b/mm/vmscan.c index 28130541270f..b0cd81c32de6 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
| @@ -201,13 +201,25 @@ static int shrink_slab(unsigned long scanned, gfp_t gfp_mask, | |||
| 201 | list_for_each_entry(shrinker, &shrinker_list, list) { | 201 | list_for_each_entry(shrinker, &shrinker_list, list) { |
| 202 | unsigned long long delta; | 202 | unsigned long long delta; |
| 203 | unsigned long total_scan; | 203 | unsigned long total_scan; |
| 204 | unsigned long max_pass = (*shrinker->shrinker)(0, gfp_mask); | ||
| 204 | 205 | ||
| 205 | delta = (4 * scanned) / shrinker->seeks; | 206 | delta = (4 * scanned) / shrinker->seeks; |
| 206 | delta *= (*shrinker->shrinker)(0, gfp_mask); | 207 | delta *= max_pass; |
| 207 | do_div(delta, lru_pages + 1); | 208 | do_div(delta, lru_pages + 1); |
| 208 | shrinker->nr += delta; | 209 | shrinker->nr += delta; |
| 209 | if (shrinker->nr < 0) | 210 | if (shrinker->nr < 0) { |
| 210 | shrinker->nr = LONG_MAX; /* It wrapped! */ | 211 | printk(KERN_ERR "%s: nr=%ld\n", |
| 212 | __FUNCTION__, shrinker->nr); | ||
| 213 | shrinker->nr = max_pass; | ||
| 214 | } | ||
| 215 | |||
| 216 | /* | ||
| 217 | * Avoid risking looping forever due to too large nr value: | ||
| 218 | * never try to free more than twice the estimate number of | ||
| 219 | * freeable entries. | ||
| 220 | */ | ||
| 221 | if (shrinker->nr > max_pass * 2) | ||
| 222 | shrinker->nr = max_pass * 2; | ||
| 211 | 223 | ||
| 212 | total_scan = shrinker->nr; | 224 | total_scan = shrinker->nr; |
| 213 | shrinker->nr = 0; | 225 | shrinker->nr = 0; |
| @@ -407,7 +419,7 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc) | |||
| 407 | if (PageWriteback(page)) | 419 | if (PageWriteback(page)) |
| 408 | goto keep_locked; | 420 | goto keep_locked; |
| 409 | 421 | ||
| 410 | referenced = page_referenced(page, 1, sc->priority <= 0); | 422 | referenced = page_referenced(page, 1); |
| 411 | /* In active use or really unfreeable? Activate it. */ | 423 | /* In active use or really unfreeable? Activate it. */ |
| 412 | if (referenced && page_mapping_inuse(page)) | 424 | if (referenced && page_mapping_inuse(page)) |
| 413 | goto activate_locked; | 425 | goto activate_locked; |
| @@ -756,7 +768,7 @@ refill_inactive_zone(struct zone *zone, struct scan_control *sc) | |||
| 756 | if (page_mapped(page)) { | 768 | if (page_mapped(page)) { |
| 757 | if (!reclaim_mapped || | 769 | if (!reclaim_mapped || |
| 758 | (total_swap_pages == 0 && PageAnon(page)) || | 770 | (total_swap_pages == 0 && PageAnon(page)) || |
| 759 | page_referenced(page, 0, sc->priority <= 0)) { | 771 | page_referenced(page, 0)) { |
| 760 | list_add(&page->lru, &l_active); | 772 | list_add(&page->lru, &l_active); |
| 761 | continue; | 773 | continue; |
| 762 | } | 774 | } |
| @@ -960,6 +972,8 @@ int try_to_free_pages(struct zone **zones, gfp_t gfp_mask) | |||
| 960 | sc.nr_reclaimed = 0; | 972 | sc.nr_reclaimed = 0; |
| 961 | sc.priority = priority; | 973 | sc.priority = priority; |
| 962 | sc.swap_cluster_max = SWAP_CLUSTER_MAX; | 974 | sc.swap_cluster_max = SWAP_CLUSTER_MAX; |
| 975 | if (!priority) | ||
| 976 | disable_swap_token(); | ||
| 963 | shrink_caches(zones, &sc); | 977 | shrink_caches(zones, &sc); |
| 964 | shrink_slab(sc.nr_scanned, gfp_mask, lru_pages); | 978 | shrink_slab(sc.nr_scanned, gfp_mask, lru_pages); |
| 965 | if (reclaim_state) { | 979 | if (reclaim_state) { |
| @@ -1056,6 +1070,10 @@ loop_again: | |||
| 1056 | int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */ | 1070 | int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */ |
| 1057 | unsigned long lru_pages = 0; | 1071 | unsigned long lru_pages = 0; |
| 1058 | 1072 | ||
| 1073 | /* The swap token gets in the way of swapout... */ | ||
| 1074 | if (!priority) | ||
| 1075 | disable_swap_token(); | ||
| 1076 | |||
| 1059 | all_zones_ok = 1; | 1077 | all_zones_ok = 1; |
| 1060 | 1078 | ||
| 1061 | if (nr_pages == 0) { | 1079 | if (nr_pages == 0) { |
| @@ -1360,6 +1378,7 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) | |||
| 1360 | sc.nr_reclaimed = 0; | 1378 | sc.nr_reclaimed = 0; |
| 1361 | /* scan at the highest priority */ | 1379 | /* scan at the highest priority */ |
| 1362 | sc.priority = 0; | 1380 | sc.priority = 0; |
| 1381 | disable_swap_token(); | ||
| 1363 | 1382 | ||
| 1364 | if (nr_pages > SWAP_CLUSTER_MAX) | 1383 | if (nr_pages > SWAP_CLUSTER_MAX) |
| 1365 | sc.swap_cluster_max = nr_pages; | 1384 | sc.swap_cluster_max = nr_pages; |
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 81e00a6c19de..e3b242daf53c 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c | |||
| @@ -39,23 +39,27 @@ static kmem_cache_t *rpc_inode_cachep __read_mostly; | |||
| 39 | #define RPC_UPCALL_TIMEOUT (30*HZ) | 39 | #define RPC_UPCALL_TIMEOUT (30*HZ) |
| 40 | 40 | ||
| 41 | static void | 41 | static void |
| 42 | __rpc_purge_upcall(struct inode *inode, int err) | 42 | __rpc_purge_list(struct rpc_inode *rpci, struct list_head *head, int err) |
| 43 | { | 43 | { |
| 44 | struct rpc_inode *rpci = RPC_I(inode); | ||
| 45 | struct rpc_pipe_msg *msg; | 44 | struct rpc_pipe_msg *msg; |
| 45 | void (*destroy_msg)(struct rpc_pipe_msg *); | ||
| 46 | 46 | ||
| 47 | while (!list_empty(&rpci->pipe)) { | 47 | destroy_msg = rpci->ops->destroy_msg; |
| 48 | msg = list_entry(rpci->pipe.next, struct rpc_pipe_msg, list); | 48 | while (!list_empty(head)) { |
| 49 | msg = list_entry(head->next, struct rpc_pipe_msg, list); | ||
| 49 | list_del_init(&msg->list); | 50 | list_del_init(&msg->list); |
| 50 | msg->errno = err; | 51 | msg->errno = err; |
| 51 | rpci->ops->destroy_msg(msg); | 52 | destroy_msg(msg); |
| 52 | } | ||
| 53 | while (!list_empty(&rpci->in_upcall)) { | ||
| 54 | msg = list_entry(rpci->pipe.next, struct rpc_pipe_msg, list); | ||
| 55 | list_del_init(&msg->list); | ||
| 56 | msg->errno = err; | ||
| 57 | rpci->ops->destroy_msg(msg); | ||
| 58 | } | 53 | } |
| 54 | } | ||
| 55 | |||
| 56 | static void | ||
| 57 | __rpc_purge_upcall(struct inode *inode, int err) | ||
| 58 | { | ||
| 59 | struct rpc_inode *rpci = RPC_I(inode); | ||
| 60 | |||
| 61 | __rpc_purge_list(rpci, &rpci->pipe, err); | ||
| 62 | __rpc_purge_list(rpci, &rpci->in_upcall, err); | ||
| 59 | rpci->pipelen = 0; | 63 | rpci->pipelen = 0; |
| 60 | wake_up(&rpci->waitq); | 64 | wake_up(&rpci->waitq); |
| 61 | } | 65 | } |
