diff options
66 files changed, 578 insertions, 353 deletions
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 2 | 1 | VERSION = 2 |
2 | PATCHLEVEL = 6 | 2 | PATCHLEVEL = 6 |
3 | SUBLEVEL = 31 | 3 | SUBLEVEL = 31 |
4 | EXTRAVERSION = -rc8 | 4 | EXTRAVERSION = -rc9 |
5 | NAME = Man-Eating Seals of Antiquity | 5 | NAME = Man-Eating Seals of Antiquity |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/ia64/kernel/dma-mapping.c b/arch/ia64/kernel/dma-mapping.c index 39a3cd0a4173..f2c1600da097 100644 --- a/arch/ia64/kernel/dma-mapping.c +++ b/arch/ia64/kernel/dma-mapping.c | |||
@@ -10,7 +10,9 @@ EXPORT_SYMBOL(dma_ops); | |||
10 | 10 | ||
11 | static int __init dma_init(void) | 11 | static int __init dma_init(void) |
12 | { | 12 | { |
13 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); | 13 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); |
14 | |||
15 | return 0; | ||
14 | } | 16 | } |
15 | fs_initcall(dma_init); | 17 | fs_initcall(dma_init); |
16 | 18 | ||
diff --git a/arch/ia64/lib/ip_fast_csum.S b/arch/ia64/lib/ip_fast_csum.S index 1f86aeb2c948..620d9dc5220f 100644 --- a/arch/ia64/lib/ip_fast_csum.S +++ b/arch/ia64/lib/ip_fast_csum.S | |||
@@ -96,20 +96,22 @@ END(ip_fast_csum) | |||
96 | GLOBAL_ENTRY(csum_ipv6_magic) | 96 | GLOBAL_ENTRY(csum_ipv6_magic) |
97 | ld4 r20=[in0],4 | 97 | ld4 r20=[in0],4 |
98 | ld4 r21=[in1],4 | 98 | ld4 r21=[in1],4 |
99 | dep r15=in3,in2,32,16 | 99 | zxt4 in2=in2 |
100 | ;; | 100 | ;; |
101 | ld4 r22=[in0],4 | 101 | ld4 r22=[in0],4 |
102 | ld4 r23=[in1],4 | 102 | ld4 r23=[in1],4 |
103 | mux1 r15=r15,@rev | 103 | dep r15=in3,in2,32,16 |
104 | ;; | 104 | ;; |
105 | ld4 r24=[in0],4 | 105 | ld4 r24=[in0],4 |
106 | ld4 r25=[in1],4 | 106 | ld4 r25=[in1],4 |
107 | shr.u r15=r15,16 | 107 | mux1 r15=r15,@rev |
108 | add r16=r20,r21 | 108 | add r16=r20,r21 |
109 | add r17=r22,r23 | 109 | add r17=r22,r23 |
110 | zxt4 in4=in4 | ||
110 | ;; | 111 | ;; |
111 | ld4 r26=[in0],4 | 112 | ld4 r26=[in0],4 |
112 | ld4 r27=[in1],4 | 113 | ld4 r27=[in1],4 |
114 | shr.u r15=r15,16 | ||
113 | add r18=r24,r25 | 115 | add r18=r24,r25 |
114 | add r8=r16,r17 | 116 | add r8=r16,r17 |
115 | ;; | 117 | ;; |
diff --git a/arch/powerpc/kernel/power7-pmu.c b/arch/powerpc/kernel/power7-pmu.c index 388cf57ad827..018d094d92f9 100644 --- a/arch/powerpc/kernel/power7-pmu.c +++ b/arch/powerpc/kernel/power7-pmu.c | |||
@@ -317,7 +317,7 @@ static int power7_generic_events[] = { | |||
317 | */ | 317 | */ |
318 | static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { | 318 | static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { |
319 | [C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */ | 319 | [C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */ |
320 | [C(OP_READ)] = { 0x400f0, 0xc880 }, | 320 | [C(OP_READ)] = { 0xc880, 0x400f0 }, |
321 | [C(OP_WRITE)] = { 0, 0x300f0 }, | 321 | [C(OP_WRITE)] = { 0, 0x300f0 }, |
322 | [C(OP_PREFETCH)] = { 0xd8b8, 0 }, | 322 | [C(OP_PREFETCH)] = { 0xd8b8, 0 }, |
323 | }, | 323 | }, |
@@ -327,8 +327,8 @@ static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { | |||
327 | [C(OP_PREFETCH)] = { 0x408a, 0 }, | 327 | [C(OP_PREFETCH)] = { 0x408a, 0 }, |
328 | }, | 328 | }, |
329 | [C(LL)] = { /* RESULT_ACCESS RESULT_MISS */ | 329 | [C(LL)] = { /* RESULT_ACCESS RESULT_MISS */ |
330 | [C(OP_READ)] = { 0x6080, 0x6084 }, | 330 | [C(OP_READ)] = { 0x16080, 0x26080 }, |
331 | [C(OP_WRITE)] = { 0x6082, 0x6086 }, | 331 | [C(OP_WRITE)] = { 0x16082, 0x26082 }, |
332 | [C(OP_PREFETCH)] = { 0, 0 }, | 332 | [C(OP_PREFETCH)] = { 0, 0 }, |
333 | }, | 333 | }, |
334 | [C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */ | 334 | [C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */ |
diff --git a/arch/powerpc/sysdev/xilinx_intc.c b/arch/powerpc/sysdev/xilinx_intc.c index 3ee1fd37bbfc..40edad520770 100644 --- a/arch/powerpc/sysdev/xilinx_intc.c +++ b/arch/powerpc/sysdev/xilinx_intc.c | |||
@@ -234,7 +234,6 @@ static void xilinx_i8259_cascade(unsigned int irq, struct irq_desc *desc) | |||
234 | generic_handle_irq(cascade_irq); | 234 | generic_handle_irq(cascade_irq); |
235 | 235 | ||
236 | /* Let xilinx_intc end the interrupt */ | 236 | /* Let xilinx_intc end the interrupt */ |
237 | desc->chip->ack(irq); | ||
238 | desc->chip->unmask(irq); | 237 | desc->chip->unmask(irq); |
239 | } | 238 | } |
240 | 239 | ||
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c index f0ee79055409..8daab33fc17d 100644 --- a/arch/sparc/kernel/irq_64.c +++ b/arch/sparc/kernel/irq_64.c | |||
@@ -886,7 +886,7 @@ void notrace init_irqwork_curcpu(void) | |||
886 | * Therefore you cannot make any OBP calls, not even prom_printf, | 886 | * Therefore you cannot make any OBP calls, not even prom_printf, |
887 | * from these two routines. | 887 | * from these two routines. |
888 | */ | 888 | */ |
889 | static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask) | 889 | static void __cpuinit notrace register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask) |
890 | { | 890 | { |
891 | unsigned long num_entries = (qmask + 1) / 64; | 891 | unsigned long num_entries = (qmask + 1) / 64; |
892 | unsigned long status; | 892 | unsigned long status; |
diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c index 2c0cc72d295b..b75bf502cd42 100644 --- a/arch/sparc/kernel/nmi.c +++ b/arch/sparc/kernel/nmi.c | |||
@@ -103,7 +103,7 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs) | |||
103 | } | 103 | } |
104 | if (!touched && __get_cpu_var(last_irq_sum) == sum) { | 104 | if (!touched && __get_cpu_var(last_irq_sum) == sum) { |
105 | local_inc(&__get_cpu_var(alert_counter)); | 105 | local_inc(&__get_cpu_var(alert_counter)); |
106 | if (local_read(&__get_cpu_var(alert_counter)) == 5 * nmi_hz) | 106 | if (local_read(&__get_cpu_var(alert_counter)) == 30 * nmi_hz) |
107 | die_nmi("BUG: NMI Watchdog detected LOCKUP", | 107 | die_nmi("BUG: NMI Watchdog detected LOCKUP", |
108 | regs, panic_on_timeout); | 108 | regs, panic_on_timeout); |
109 | } else { | 109 | } else { |
diff --git a/arch/sparc/prom/misc_64.c b/arch/sparc/prom/misc_64.c index eedffb4fec2d..39fc6af21b7c 100644 --- a/arch/sparc/prom/misc_64.c +++ b/arch/sparc/prom/misc_64.c | |||
@@ -88,7 +88,7 @@ void prom_cmdline(void) | |||
88 | /* Drop into the prom, but completely terminate the program. | 88 | /* Drop into the prom, but completely terminate the program. |
89 | * No chance of continuing. | 89 | * No chance of continuing. |
90 | */ | 90 | */ |
91 | void prom_halt(void) | 91 | void notrace prom_halt(void) |
92 | { | 92 | { |
93 | #ifdef CONFIG_SUN_LDOMS | 93 | #ifdef CONFIG_SUN_LDOMS |
94 | if (ldom_domaining_enabled) | 94 | if (ldom_domaining_enabled) |
diff --git a/arch/sparc/prom/printf.c b/arch/sparc/prom/printf.c index 660943ee4c2a..ca869266b9f3 100644 --- a/arch/sparc/prom/printf.c +++ b/arch/sparc/prom/printf.c | |||
@@ -14,14 +14,14 @@ | |||
14 | */ | 14 | */ |
15 | 15 | ||
16 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
17 | #include <linux/compiler.h> | ||
17 | 18 | ||
18 | #include <asm/openprom.h> | 19 | #include <asm/openprom.h> |
19 | #include <asm/oplib.h> | 20 | #include <asm/oplib.h> |
20 | 21 | ||
21 | static char ppbuf[1024]; | 22 | static char ppbuf[1024]; |
22 | 23 | ||
23 | void | 24 | void notrace prom_write(const char *buf, unsigned int n) |
24 | prom_write(const char *buf, unsigned int n) | ||
25 | { | 25 | { |
26 | char ch; | 26 | char ch; |
27 | 27 | ||
@@ -33,8 +33,7 @@ prom_write(const char *buf, unsigned int n) | |||
33 | } | 33 | } |
34 | } | 34 | } |
35 | 35 | ||
36 | void | 36 | void notrace prom_printf(const char *fmt, ...) |
37 | prom_printf(const char *fmt, ...) | ||
38 | { | 37 | { |
39 | va_list args; | 38 | va_list args; |
40 | int i; | 39 | int i; |
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 418d63619680..d3aa2aadb3e0 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c | |||
@@ -133,7 +133,7 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) | |||
133 | return -EINVAL; | 133 | return -EINVAL; |
134 | 134 | ||
135 | spin_lock_irq(q->queue_lock); | 135 | spin_lock_irq(q->queue_lock); |
136 | blk_queue_max_sectors(q, max_sectors_kb << 1); | 136 | q->limits.max_sectors = max_sectors_kb << 1; |
137 | spin_unlock_irq(q->queue_lock); | 137 | spin_unlock_irq(q->queue_lock); |
138 | 138 | ||
139 | return ret; | 139 | return ret; |
diff --git a/crypto/algapi.c b/crypto/algapi.c index 56c62e2858d5..df0863d56995 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c | |||
@@ -692,7 +692,7 @@ out: | |||
692 | } | 692 | } |
693 | EXPORT_SYMBOL_GPL(crypto_enqueue_request); | 693 | EXPORT_SYMBOL_GPL(crypto_enqueue_request); |
694 | 694 | ||
695 | struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue) | 695 | void *__crypto_dequeue_request(struct crypto_queue *queue, unsigned int offset) |
696 | { | 696 | { |
697 | struct list_head *request; | 697 | struct list_head *request; |
698 | 698 | ||
@@ -707,7 +707,14 @@ struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue) | |||
707 | request = queue->list.next; | 707 | request = queue->list.next; |
708 | list_del(request); | 708 | list_del(request); |
709 | 709 | ||
710 | return list_entry(request, struct crypto_async_request, list); | 710 | return (char *)list_entry(request, struct crypto_async_request, list) - |
711 | offset; | ||
712 | } | ||
713 | EXPORT_SYMBOL_GPL(__crypto_dequeue_request); | ||
714 | |||
715 | struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue) | ||
716 | { | ||
717 | return __crypto_dequeue_request(queue, 0); | ||
711 | } | 718 | } |
712 | EXPORT_SYMBOL_GPL(crypto_dequeue_request); | 719 | EXPORT_SYMBOL_GPL(crypto_dequeue_request); |
713 | 720 | ||
diff --git a/drivers/char/n_tty.c b/drivers/char/n_tty.c index 973be2f44195..4e28b35024ec 100644 --- a/drivers/char/n_tty.c +++ b/drivers/char/n_tty.c | |||
@@ -300,8 +300,7 @@ static int do_output_char(unsigned char c, struct tty_struct *tty, int space) | |||
300 | if (space < 2) | 300 | if (space < 2) |
301 | return -1; | 301 | return -1; |
302 | tty->canon_column = tty->column = 0; | 302 | tty->canon_column = tty->column = 0; |
303 | tty_put_char(tty, '\r'); | 303 | tty->ops->write(tty, "\r\n", 2); |
304 | tty_put_char(tty, c); | ||
305 | return 2; | 304 | return 2; |
306 | } | 305 | } |
307 | tty->canon_column = tty->column; | 306 | tty->canon_column = tty->column; |
diff --git a/drivers/char/pty.c b/drivers/char/pty.c index d083c73d784a..b33d6688e910 100644 --- a/drivers/char/pty.c +++ b/drivers/char/pty.c | |||
@@ -109,21 +109,13 @@ static int pty_space(struct tty_struct *to) | |||
109 | * the other side of the pty/tty pair. | 109 | * the other side of the pty/tty pair. |
110 | */ | 110 | */ |
111 | 111 | ||
112 | static int pty_write(struct tty_struct *tty, const unsigned char *buf, | 112 | static int pty_write(struct tty_struct *tty, const unsigned char *buf, int c) |
113 | int count) | ||
114 | { | 113 | { |
115 | struct tty_struct *to = tty->link; | 114 | struct tty_struct *to = tty->link; |
116 | int c; | ||
117 | 115 | ||
118 | if (tty->stopped) | 116 | if (tty->stopped) |
119 | return 0; | 117 | return 0; |
120 | 118 | ||
121 | /* This isn't locked but our 8K is quite sloppy so no | ||
122 | big deal */ | ||
123 | |||
124 | c = pty_space(to); | ||
125 | if (c > count) | ||
126 | c = count; | ||
127 | if (c > 0) { | 119 | if (c > 0) { |
128 | /* Stuff the data into the input queue of the other end */ | 120 | /* Stuff the data into the input queue of the other end */ |
129 | c = tty_insert_flip_string(to, buf, c); | 121 | c = tty_insert_flip_string(to, buf, c); |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index fd69086d08d5..2968ed6a9c49 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -1250,20 +1250,11 @@ static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg) | |||
1250 | { | 1250 | { |
1251 | int ret = 0; | 1251 | int ret = 0; |
1252 | 1252 | ||
1253 | #ifdef __powerpc__ | ||
1254 | int cpu = sysdev->id; | 1253 | int cpu = sysdev->id; |
1255 | unsigned int cur_freq = 0; | ||
1256 | struct cpufreq_policy *cpu_policy; | 1254 | struct cpufreq_policy *cpu_policy; |
1257 | 1255 | ||
1258 | dprintk("suspending cpu %u\n", cpu); | 1256 | dprintk("suspending cpu %u\n", cpu); |
1259 | 1257 | ||
1260 | /* | ||
1261 | * This whole bogosity is here because Powerbooks are made of fail. | ||
1262 | * No sane platform should need any of the code below to be run. | ||
1263 | * (it's entirely the wrong thing to do, as driver->get may | ||
1264 | * reenable interrupts on some architectures). | ||
1265 | */ | ||
1266 | |||
1267 | if (!cpu_online(cpu)) | 1258 | if (!cpu_online(cpu)) |
1268 | return 0; | 1259 | return 0; |
1269 | 1260 | ||
@@ -1282,47 +1273,13 @@ static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg) | |||
1282 | 1273 | ||
1283 | if (cpufreq_driver->suspend) { | 1274 | if (cpufreq_driver->suspend) { |
1284 | ret = cpufreq_driver->suspend(cpu_policy, pmsg); | 1275 | ret = cpufreq_driver->suspend(cpu_policy, pmsg); |
1285 | if (ret) { | 1276 | if (ret) |
1286 | printk(KERN_ERR "cpufreq: suspend failed in ->suspend " | 1277 | printk(KERN_ERR "cpufreq: suspend failed in ->suspend " |
1287 | "step on CPU %u\n", cpu_policy->cpu); | 1278 | "step on CPU %u\n", cpu_policy->cpu); |
1288 | goto out; | ||
1289 | } | ||
1290 | } | ||
1291 | |||
1292 | if (cpufreq_driver->flags & CPUFREQ_CONST_LOOPS) | ||
1293 | goto out; | ||
1294 | |||
1295 | if (cpufreq_driver->get) | ||
1296 | cur_freq = cpufreq_driver->get(cpu_policy->cpu); | ||
1297 | |||
1298 | if (!cur_freq || !cpu_policy->cur) { | ||
1299 | printk(KERN_ERR "cpufreq: suspend failed to assert current " | ||
1300 | "frequency is what timing core thinks it is.\n"); | ||
1301 | goto out; | ||
1302 | } | ||
1303 | |||
1304 | if (unlikely(cur_freq != cpu_policy->cur)) { | ||
1305 | struct cpufreq_freqs freqs; | ||
1306 | |||
1307 | if (!(cpufreq_driver->flags & CPUFREQ_PM_NO_WARN)) | ||
1308 | dprintk("Warning: CPU frequency is %u, " | ||
1309 | "cpufreq assumed %u kHz.\n", | ||
1310 | cur_freq, cpu_policy->cur); | ||
1311 | |||
1312 | freqs.cpu = cpu; | ||
1313 | freqs.old = cpu_policy->cur; | ||
1314 | freqs.new = cur_freq; | ||
1315 | |||
1316 | srcu_notifier_call_chain(&cpufreq_transition_notifier_list, | ||
1317 | CPUFREQ_SUSPENDCHANGE, &freqs); | ||
1318 | adjust_jiffies(CPUFREQ_SUSPENDCHANGE, &freqs); | ||
1319 | |||
1320 | cpu_policy->cur = cur_freq; | ||
1321 | } | 1279 | } |
1322 | 1280 | ||
1323 | out: | 1281 | out: |
1324 | cpufreq_cpu_put(cpu_policy); | 1282 | cpufreq_cpu_put(cpu_policy); |
1325 | #endif /* __powerpc__ */ | ||
1326 | return ret; | 1283 | return ret; |
1327 | } | 1284 | } |
1328 | 1285 | ||
@@ -1330,24 +1287,21 @@ out: | |||
1330 | * cpufreq_resume - restore proper CPU frequency handling after resume | 1287 | * cpufreq_resume - restore proper CPU frequency handling after resume |
1331 | * | 1288 | * |
1332 | * 1.) resume CPUfreq hardware support (cpufreq_driver->resume()) | 1289 | * 1.) resume CPUfreq hardware support (cpufreq_driver->resume()) |
1333 | * 2.) if ->target and !CPUFREQ_CONST_LOOPS: verify we're in sync | 1290 | * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are |
1334 | * 3.) schedule call cpufreq_update_policy() ASAP as interrupts are | 1291 | * restored. It will verify that the current freq is in sync with |
1335 | * restored. | 1292 | * what we believe it to be. This is a bit later than when it |
1293 | * should be, but nonethteless it's better than calling | ||
1294 | * cpufreq_driver->get() here which might re-enable interrupts... | ||
1336 | */ | 1295 | */ |
1337 | static int cpufreq_resume(struct sys_device *sysdev) | 1296 | static int cpufreq_resume(struct sys_device *sysdev) |
1338 | { | 1297 | { |
1339 | int ret = 0; | 1298 | int ret = 0; |
1340 | 1299 | ||
1341 | #ifdef __powerpc__ | ||
1342 | int cpu = sysdev->id; | 1300 | int cpu = sysdev->id; |
1343 | struct cpufreq_policy *cpu_policy; | 1301 | struct cpufreq_policy *cpu_policy; |
1344 | 1302 | ||
1345 | dprintk("resuming cpu %u\n", cpu); | 1303 | dprintk("resuming cpu %u\n", cpu); |
1346 | 1304 | ||
1347 | /* As with the ->suspend method, all the code below is | ||
1348 | * only necessary because Powerbooks suck. | ||
1349 | * See commit 42d4dc3f4e1e for jokes. */ | ||
1350 | |||
1351 | if (!cpu_online(cpu)) | 1305 | if (!cpu_online(cpu)) |
1352 | return 0; | 1306 | return 0; |
1353 | 1307 | ||
@@ -1373,45 +1327,10 @@ static int cpufreq_resume(struct sys_device *sysdev) | |||
1373 | } | 1327 | } |
1374 | } | 1328 | } |
1375 | 1329 | ||
1376 | if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { | ||
1377 | unsigned int cur_freq = 0; | ||
1378 | |||
1379 | if (cpufreq_driver->get) | ||
1380 | cur_freq = cpufreq_driver->get(cpu_policy->cpu); | ||
1381 | |||
1382 | if (!cur_freq || !cpu_policy->cur) { | ||
1383 | printk(KERN_ERR "cpufreq: resume failed to assert " | ||
1384 | "current frequency is what timing core " | ||
1385 | "thinks it is.\n"); | ||
1386 | goto out; | ||
1387 | } | ||
1388 | |||
1389 | if (unlikely(cur_freq != cpu_policy->cur)) { | ||
1390 | struct cpufreq_freqs freqs; | ||
1391 | |||
1392 | if (!(cpufreq_driver->flags & CPUFREQ_PM_NO_WARN)) | ||
1393 | dprintk("Warning: CPU frequency " | ||
1394 | "is %u, cpufreq assumed %u kHz.\n", | ||
1395 | cur_freq, cpu_policy->cur); | ||
1396 | |||
1397 | freqs.cpu = cpu; | ||
1398 | freqs.old = cpu_policy->cur; | ||
1399 | freqs.new = cur_freq; | ||
1400 | |||
1401 | srcu_notifier_call_chain( | ||
1402 | &cpufreq_transition_notifier_list, | ||
1403 | CPUFREQ_RESUMECHANGE, &freqs); | ||
1404 | adjust_jiffies(CPUFREQ_RESUMECHANGE, &freqs); | ||
1405 | |||
1406 | cpu_policy->cur = cur_freq; | ||
1407 | } | ||
1408 | } | ||
1409 | |||
1410 | out: | ||
1411 | schedule_work(&cpu_policy->update); | 1330 | schedule_work(&cpu_policy->update); |
1331 | |||
1412 | fail: | 1332 | fail: |
1413 | cpufreq_cpu_put(cpu_policy); | 1333 | cpufreq_cpu_put(cpu_policy); |
1414 | #endif /* __powerpc__ */ | ||
1415 | return ret; | 1334 | return ret; |
1416 | } | 1335 | } |
1417 | 1336 | ||
diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c index 110e731f5574..1c0b504a42f3 100644 --- a/drivers/firewire/core-iso.c +++ b/drivers/firewire/core-iso.c | |||
@@ -196,7 +196,7 @@ static int manage_bandwidth(struct fw_card *card, int irm_id, int generation, | |||
196 | switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, | 196 | switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, |
197 | irm_id, generation, SCODE_100, | 197 | irm_id, generation, SCODE_100, |
198 | CSR_REGISTER_BASE + CSR_BANDWIDTH_AVAILABLE, | 198 | CSR_REGISTER_BASE + CSR_BANDWIDTH_AVAILABLE, |
199 | data, sizeof(data))) { | 199 | data, 8)) { |
200 | case RCODE_GENERATION: | 200 | case RCODE_GENERATION: |
201 | /* A generation change frees all bandwidth. */ | 201 | /* A generation change frees all bandwidth. */ |
202 | return allocate ? -EAGAIN : bandwidth; | 202 | return allocate ? -EAGAIN : bandwidth; |
@@ -233,7 +233,7 @@ static int manage_channel(struct fw_card *card, int irm_id, int generation, | |||
233 | data[1] = old ^ c; | 233 | data[1] = old ^ c; |
234 | switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, | 234 | switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, |
235 | irm_id, generation, SCODE_100, | 235 | irm_id, generation, SCODE_100, |
236 | offset, data, sizeof(data))) { | 236 | offset, data, 8)) { |
237 | case RCODE_GENERATION: | 237 | case RCODE_GENERATION: |
238 | /* A generation change frees all channels. */ | 238 | /* A generation change frees all channels. */ |
239 | return allocate ? -EAGAIN : i; | 239 | return allocate ? -EAGAIN : i; |
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index ecddd11b797a..76b321bb73f9 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/module.h> | 34 | #include <linux/module.h> |
35 | #include <linux/moduleparam.h> | 35 | #include <linux/moduleparam.h> |
36 | #include <linux/pci.h> | 36 | #include <linux/pci.h> |
37 | #include <linux/pci_ids.h> | ||
37 | #include <linux/spinlock.h> | 38 | #include <linux/spinlock.h> |
38 | #include <linux/string.h> | 39 | #include <linux/string.h> |
39 | 40 | ||
@@ -2372,6 +2373,9 @@ static void ohci_pmac_off(struct pci_dev *dev) | |||
2372 | #define ohci_pmac_off(dev) | 2373 | #define ohci_pmac_off(dev) |
2373 | #endif /* CONFIG_PPC_PMAC */ | 2374 | #endif /* CONFIG_PPC_PMAC */ |
2374 | 2375 | ||
2376 | #define PCI_VENDOR_ID_AGERE PCI_VENDOR_ID_ATT | ||
2377 | #define PCI_DEVICE_ID_AGERE_FW643 0x5901 | ||
2378 | |||
2375 | static int __devinit pci_probe(struct pci_dev *dev, | 2379 | static int __devinit pci_probe(struct pci_dev *dev, |
2376 | const struct pci_device_id *ent) | 2380 | const struct pci_device_id *ent) |
2377 | { | 2381 | { |
@@ -2422,6 +2426,16 @@ static int __devinit pci_probe(struct pci_dev *dev, | |||
2422 | version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff; | 2426 | version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff; |
2423 | ohci->use_dualbuffer = version >= OHCI_VERSION_1_1; | 2427 | ohci->use_dualbuffer = version >= OHCI_VERSION_1_1; |
2424 | 2428 | ||
2429 | /* dual-buffer mode is broken if more than one IR context is active */ | ||
2430 | if (dev->vendor == PCI_VENDOR_ID_AGERE && | ||
2431 | dev->device == PCI_DEVICE_ID_AGERE_FW643) | ||
2432 | ohci->use_dualbuffer = false; | ||
2433 | |||
2434 | /* dual-buffer mode is broken */ | ||
2435 | if (dev->vendor == PCI_VENDOR_ID_RICOH && | ||
2436 | dev->device == PCI_DEVICE_ID_RICOH_R5C832) | ||
2437 | ohci->use_dualbuffer = false; | ||
2438 | |||
2425 | /* x86-32 currently doesn't use highmem for dma_alloc_coherent */ | 2439 | /* x86-32 currently doesn't use highmem for dma_alloc_coherent */ |
2426 | #if !defined(CONFIG_X86_32) | 2440 | #if !defined(CONFIG_X86_32) |
2427 | /* dual-buffer mode is broken with descriptor addresses above 2G */ | 2441 | /* dual-buffer mode is broken with descriptor addresses above 2G */ |
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c index 8d51568ee143..e5df822a8130 100644 --- a/drivers/firewire/sbp2.c +++ b/drivers/firewire/sbp2.c | |||
@@ -456,12 +456,12 @@ static void sbp2_status_write(struct fw_card *card, struct fw_request *request, | |||
456 | } | 456 | } |
457 | spin_unlock_irqrestore(&card->lock, flags); | 457 | spin_unlock_irqrestore(&card->lock, flags); |
458 | 458 | ||
459 | if (&orb->link != &lu->orb_list) | 459 | if (&orb->link != &lu->orb_list) { |
460 | orb->callback(orb, &status); | 460 | orb->callback(orb, &status); |
461 | else | 461 | kref_put(&orb->kref, free_orb); |
462 | } else { | ||
462 | fw_error("status write for unknown orb\n"); | 463 | fw_error("status write for unknown orb\n"); |
463 | 464 | } | |
464 | kref_put(&orb->kref, free_orb); | ||
465 | 465 | ||
466 | fw_send_response(card, request, RCODE_COMPLETE); | 466 | fw_send_response(card, request, RCODE_COMPLETE); |
467 | } | 467 | } |
diff --git a/drivers/ide/ide-cs.c b/drivers/ide/ide-cs.c index 527908ff298c..063b933d864a 100644 --- a/drivers/ide/ide-cs.c +++ b/drivers/ide/ide-cs.c | |||
@@ -408,6 +408,7 @@ static struct pcmcia_device_id ide_ids[] = { | |||
408 | PCMCIA_DEVICE_PROD_ID123("PCMCIA", "IDE CARD", "F1", 0x281f1c5d, 0x1907960c, 0xf7fde8b9), | 408 | PCMCIA_DEVICE_PROD_ID123("PCMCIA", "IDE CARD", "F1", 0x281f1c5d, 0x1907960c, 0xf7fde8b9), |
409 | PCMCIA_DEVICE_PROD_ID12("ARGOSY", "CD-ROM", 0x78f308dc, 0x66536591), | 409 | PCMCIA_DEVICE_PROD_ID12("ARGOSY", "CD-ROM", 0x78f308dc, 0x66536591), |
410 | PCMCIA_DEVICE_PROD_ID12("ARGOSY", "PnPIDE", 0x78f308dc, 0x0c694728), | 410 | PCMCIA_DEVICE_PROD_ID12("ARGOSY", "PnPIDE", 0x78f308dc, 0x0c694728), |
411 | PCMCIA_DEVICE_PROD_ID12("CNF ", "CD-ROM", 0x46d7db81, 0x66536591), | ||
411 | PCMCIA_DEVICE_PROD_ID12("CNF CD-M", "CD-ROM", 0x7d93b852, 0x66536591), | 412 | PCMCIA_DEVICE_PROD_ID12("CNF CD-M", "CD-ROM", 0x7d93b852, 0x66536591), |
412 | PCMCIA_DEVICE_PROD_ID12("Creative Technology Ltd.", "PCMCIA CD-ROM Interface Card", 0xff8c8a45, 0xfe8020c4), | 413 | PCMCIA_DEVICE_PROD_ID12("Creative Technology Ltd.", "PCMCIA CD-ROM Interface Card", 0xff8c8a45, 0xfe8020c4), |
413 | PCMCIA_DEVICE_PROD_ID12("Digital Equipment Corporation.", "Digital Mobile Media CD-ROM", 0x17692a66, 0xef1dcbde), | 414 | PCMCIA_DEVICE_PROD_ID12("Digital Equipment Corporation.", "Digital Mobile Media CD-ROM", 0x17692a66, 0xef1dcbde), |
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c index 95fe0452dae4..6c6a09b1c0fe 100644 --- a/drivers/input/keyboard/atkbd.c +++ b/drivers/input/keyboard/atkbd.c | |||
@@ -880,6 +880,14 @@ static unsigned int atkbd_hp_zv6100_forced_release_keys[] = { | |||
880 | }; | 880 | }; |
881 | 881 | ||
882 | /* | 882 | /* |
883 | * Perform fixup for HP (Compaq) Presario R4000 R4100 R4200 that don't generate | ||
884 | * release for their volume buttons | ||
885 | */ | ||
886 | static unsigned int atkbd_hp_r4000_forced_release_keys[] = { | ||
887 | 0xae, 0xb0, -1U | ||
888 | }; | ||
889 | |||
890 | /* | ||
883 | * Samsung NC10,NC20 with Fn+F? key release not working | 891 | * Samsung NC10,NC20 with Fn+F? key release not working |
884 | */ | 892 | */ |
885 | static unsigned int atkbd_samsung_forced_release_keys[] = { | 893 | static unsigned int atkbd_samsung_forced_release_keys[] = { |
@@ -1537,6 +1545,33 @@ static struct dmi_system_id atkbd_dmi_quirk_table[] __initdata = { | |||
1537 | .driver_data = atkbd_hp_zv6100_forced_release_keys, | 1545 | .driver_data = atkbd_hp_zv6100_forced_release_keys, |
1538 | }, | 1546 | }, |
1539 | { | 1547 | { |
1548 | .ident = "HP Presario R4000", | ||
1549 | .matches = { | ||
1550 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), | ||
1551 | DMI_MATCH(DMI_PRODUCT_NAME, "Presario R4000"), | ||
1552 | }, | ||
1553 | .callback = atkbd_setup_forced_release, | ||
1554 | .driver_data = atkbd_hp_r4000_forced_release_keys, | ||
1555 | }, | ||
1556 | { | ||
1557 | .ident = "HP Presario R4100", | ||
1558 | .matches = { | ||
1559 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), | ||
1560 | DMI_MATCH(DMI_PRODUCT_NAME, "Presario R4100"), | ||
1561 | }, | ||
1562 | .callback = atkbd_setup_forced_release, | ||
1563 | .driver_data = atkbd_hp_r4000_forced_release_keys, | ||
1564 | }, | ||
1565 | { | ||
1566 | .ident = "HP Presario R4200", | ||
1567 | .matches = { | ||
1568 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), | ||
1569 | DMI_MATCH(DMI_PRODUCT_NAME, "Presario R4200"), | ||
1570 | }, | ||
1571 | .callback = atkbd_setup_forced_release, | ||
1572 | .driver_data = atkbd_hp_r4000_forced_release_keys, | ||
1573 | }, | ||
1574 | { | ||
1540 | .ident = "Inventec Symphony", | 1575 | .ident = "Inventec Symphony", |
1541 | .matches = { | 1576 | .matches = { |
1542 | DMI_MATCH(DMI_SYS_VENDOR, "INVENTEC"), | 1577 | DMI_MATCH(DMI_SYS_VENDOR, "INVENTEC"), |
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index ae04d8a494e5..ccbf23ece8e3 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h | |||
@@ -382,6 +382,14 @@ static struct dmi_system_id __initdata i8042_dmi_nomux_table[] = { | |||
382 | DMI_MATCH(DMI_PRODUCT_NAME, "Vostro1510"), | 382 | DMI_MATCH(DMI_PRODUCT_NAME, "Vostro1510"), |
383 | }, | 383 | }, |
384 | }, | 384 | }, |
385 | { | ||
386 | .ident = "Acer Aspire 5536", | ||
387 | .matches = { | ||
388 | DMI_MATCH(DMI_SYS_VENDOR, "Acer"), | ||
389 | DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5536"), | ||
390 | DMI_MATCH(DMI_PRODUCT_VERSION, "0100"), | ||
391 | }, | ||
392 | }, | ||
385 | { } | 393 | { } |
386 | }; | 394 | }; |
387 | 395 | ||
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c index 3710ff88fc10..556acff3952f 100644 --- a/drivers/md/dm-exception-store.c +++ b/drivers/md/dm-exception-store.c | |||
@@ -171,6 +171,14 @@ static int set_chunk_size(struct dm_exception_store *store, | |||
171 | */ | 171 | */ |
172 | chunk_size_ulong = round_up(chunk_size_ulong, PAGE_SIZE >> 9); | 172 | chunk_size_ulong = round_up(chunk_size_ulong, PAGE_SIZE >> 9); |
173 | 173 | ||
174 | return dm_exception_store_set_chunk_size(store, chunk_size_ulong, | ||
175 | error); | ||
176 | } | ||
177 | |||
178 | int dm_exception_store_set_chunk_size(struct dm_exception_store *store, | ||
179 | unsigned long chunk_size_ulong, | ||
180 | char **error) | ||
181 | { | ||
174 | /* Check chunk_size is a power of 2 */ | 182 | /* Check chunk_size is a power of 2 */ |
175 | if (!is_power_of_2(chunk_size_ulong)) { | 183 | if (!is_power_of_2(chunk_size_ulong)) { |
176 | *error = "Chunk size is not a power of 2"; | 184 | *error = "Chunk size is not a power of 2"; |
@@ -183,6 +191,11 @@ static int set_chunk_size(struct dm_exception_store *store, | |||
183 | return -EINVAL; | 191 | return -EINVAL; |
184 | } | 192 | } |
185 | 193 | ||
194 | if (chunk_size_ulong > INT_MAX >> SECTOR_SHIFT) { | ||
195 | *error = "Chunk size is too high"; | ||
196 | return -EINVAL; | ||
197 | } | ||
198 | |||
186 | store->chunk_size = chunk_size_ulong; | 199 | store->chunk_size = chunk_size_ulong; |
187 | store->chunk_mask = chunk_size_ulong - 1; | 200 | store->chunk_mask = chunk_size_ulong - 1; |
188 | store->chunk_shift = ffs(chunk_size_ulong) - 1; | 201 | store->chunk_shift = ffs(chunk_size_ulong) - 1; |
diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h index 2442c8c07898..812c71872ba0 100644 --- a/drivers/md/dm-exception-store.h +++ b/drivers/md/dm-exception-store.h | |||
@@ -168,6 +168,10 @@ static inline chunk_t sector_to_chunk(struct dm_exception_store *store, | |||
168 | int dm_exception_store_type_register(struct dm_exception_store_type *type); | 168 | int dm_exception_store_type_register(struct dm_exception_store_type *type); |
169 | int dm_exception_store_type_unregister(struct dm_exception_store_type *type); | 169 | int dm_exception_store_type_unregister(struct dm_exception_store_type *type); |
170 | 170 | ||
171 | int dm_exception_store_set_chunk_size(struct dm_exception_store *store, | ||
172 | unsigned long chunk_size_ulong, | ||
173 | char **error); | ||
174 | |||
171 | int dm_exception_store_create(struct dm_target *ti, int argc, char **argv, | 175 | int dm_exception_store_create(struct dm_target *ti, int argc, char **argv, |
172 | unsigned *args_used, | 176 | unsigned *args_used, |
173 | struct dm_exception_store **store); | 177 | struct dm_exception_store **store); |
diff --git a/drivers/md/dm-log-userspace-base.c b/drivers/md/dm-log-userspace-base.c index e69b96560997..6e186b1a062d 100644 --- a/drivers/md/dm-log-userspace-base.c +++ b/drivers/md/dm-log-userspace-base.c | |||
@@ -21,6 +21,7 @@ struct log_c { | |||
21 | struct dm_target *ti; | 21 | struct dm_target *ti; |
22 | uint32_t region_size; | 22 | uint32_t region_size; |
23 | region_t region_count; | 23 | region_t region_count; |
24 | uint64_t luid; | ||
24 | char uuid[DM_UUID_LEN]; | 25 | char uuid[DM_UUID_LEN]; |
25 | 26 | ||
26 | char *usr_argv_str; | 27 | char *usr_argv_str; |
@@ -63,7 +64,7 @@ static int userspace_do_request(struct log_c *lc, const char *uuid, | |||
63 | * restored. | 64 | * restored. |
64 | */ | 65 | */ |
65 | retry: | 66 | retry: |
66 | r = dm_consult_userspace(uuid, request_type, data, | 67 | r = dm_consult_userspace(uuid, lc->luid, request_type, data, |
67 | data_size, rdata, rdata_size); | 68 | data_size, rdata, rdata_size); |
68 | 69 | ||
69 | if (r != -ESRCH) | 70 | if (r != -ESRCH) |
@@ -74,14 +75,15 @@ retry: | |||
74 | set_current_state(TASK_INTERRUPTIBLE); | 75 | set_current_state(TASK_INTERRUPTIBLE); |
75 | schedule_timeout(2*HZ); | 76 | schedule_timeout(2*HZ); |
76 | DMWARN("Attempting to contact userspace log server..."); | 77 | DMWARN("Attempting to contact userspace log server..."); |
77 | r = dm_consult_userspace(uuid, DM_ULOG_CTR, lc->usr_argv_str, | 78 | r = dm_consult_userspace(uuid, lc->luid, DM_ULOG_CTR, |
79 | lc->usr_argv_str, | ||
78 | strlen(lc->usr_argv_str) + 1, | 80 | strlen(lc->usr_argv_str) + 1, |
79 | NULL, NULL); | 81 | NULL, NULL); |
80 | if (!r) | 82 | if (!r) |
81 | break; | 83 | break; |
82 | } | 84 | } |
83 | DMINFO("Reconnected to userspace log server... DM_ULOG_CTR complete"); | 85 | DMINFO("Reconnected to userspace log server... DM_ULOG_CTR complete"); |
84 | r = dm_consult_userspace(uuid, DM_ULOG_RESUME, NULL, | 86 | r = dm_consult_userspace(uuid, lc->luid, DM_ULOG_RESUME, NULL, |
85 | 0, NULL, NULL); | 87 | 0, NULL, NULL); |
86 | if (!r) | 88 | if (!r) |
87 | goto retry; | 89 | goto retry; |
@@ -111,10 +113,9 @@ static int build_constructor_string(struct dm_target *ti, | |||
111 | return -ENOMEM; | 113 | return -ENOMEM; |
112 | } | 114 | } |
113 | 115 | ||
114 | for (i = 0, str_size = 0; i < argc; i++) | 116 | str_size = sprintf(str, "%llu", (unsigned long long)ti->len); |
115 | str_size += sprintf(str + str_size, "%s ", argv[i]); | 117 | for (i = 0; i < argc; i++) |
116 | str_size += sprintf(str + str_size, "%llu", | 118 | str_size += sprintf(str + str_size, " %s", argv[i]); |
117 | (unsigned long long)ti->len); | ||
118 | 119 | ||
119 | *ctr_str = str; | 120 | *ctr_str = str; |
120 | return str_size; | 121 | return str_size; |
@@ -154,6 +155,9 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti, | |||
154 | return -ENOMEM; | 155 | return -ENOMEM; |
155 | } | 156 | } |
156 | 157 | ||
158 | /* The ptr value is sufficient for local unique id */ | ||
159 | lc->luid = (uint64_t)lc; | ||
160 | |||
157 | lc->ti = ti; | 161 | lc->ti = ti; |
158 | 162 | ||
159 | if (strlen(argv[0]) > (DM_UUID_LEN - 1)) { | 163 | if (strlen(argv[0]) > (DM_UUID_LEN - 1)) { |
@@ -173,7 +177,7 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti, | |||
173 | } | 177 | } |
174 | 178 | ||
175 | /* Send table string */ | 179 | /* Send table string */ |
176 | r = dm_consult_userspace(lc->uuid, DM_ULOG_CTR, | 180 | r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_CTR, |
177 | ctr_str, str_size, NULL, NULL); | 181 | ctr_str, str_size, NULL, NULL); |
178 | 182 | ||
179 | if (r == -ESRCH) { | 183 | if (r == -ESRCH) { |
@@ -183,7 +187,7 @@ static int userspace_ctr(struct dm_dirty_log *log, struct dm_target *ti, | |||
183 | 187 | ||
184 | /* Since the region size does not change, get it now */ | 188 | /* Since the region size does not change, get it now */ |
185 | rdata_size = sizeof(rdata); | 189 | rdata_size = sizeof(rdata); |
186 | r = dm_consult_userspace(lc->uuid, DM_ULOG_GET_REGION_SIZE, | 190 | r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_GET_REGION_SIZE, |
187 | NULL, 0, (char *)&rdata, &rdata_size); | 191 | NULL, 0, (char *)&rdata, &rdata_size); |
188 | 192 | ||
189 | if (r) { | 193 | if (r) { |
@@ -212,7 +216,7 @@ static void userspace_dtr(struct dm_dirty_log *log) | |||
212 | int r; | 216 | int r; |
213 | struct log_c *lc = log->context; | 217 | struct log_c *lc = log->context; |
214 | 218 | ||
215 | r = dm_consult_userspace(lc->uuid, DM_ULOG_DTR, | 219 | r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_DTR, |
216 | NULL, 0, | 220 | NULL, 0, |
217 | NULL, NULL); | 221 | NULL, NULL); |
218 | 222 | ||
@@ -227,7 +231,7 @@ static int userspace_presuspend(struct dm_dirty_log *log) | |||
227 | int r; | 231 | int r; |
228 | struct log_c *lc = log->context; | 232 | struct log_c *lc = log->context; |
229 | 233 | ||
230 | r = dm_consult_userspace(lc->uuid, DM_ULOG_PRESUSPEND, | 234 | r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_PRESUSPEND, |
231 | NULL, 0, | 235 | NULL, 0, |
232 | NULL, NULL); | 236 | NULL, NULL); |
233 | 237 | ||
@@ -239,7 +243,7 @@ static int userspace_postsuspend(struct dm_dirty_log *log) | |||
239 | int r; | 243 | int r; |
240 | struct log_c *lc = log->context; | 244 | struct log_c *lc = log->context; |
241 | 245 | ||
242 | r = dm_consult_userspace(lc->uuid, DM_ULOG_POSTSUSPEND, | 246 | r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_POSTSUSPEND, |
243 | NULL, 0, | 247 | NULL, 0, |
244 | NULL, NULL); | 248 | NULL, NULL); |
245 | 249 | ||
@@ -252,7 +256,7 @@ static int userspace_resume(struct dm_dirty_log *log) | |||
252 | struct log_c *lc = log->context; | 256 | struct log_c *lc = log->context; |
253 | 257 | ||
254 | lc->in_sync_hint = 0; | 258 | lc->in_sync_hint = 0; |
255 | r = dm_consult_userspace(lc->uuid, DM_ULOG_RESUME, | 259 | r = dm_consult_userspace(lc->uuid, lc->luid, DM_ULOG_RESUME, |
256 | NULL, 0, | 260 | NULL, 0, |
257 | NULL, NULL); | 261 | NULL, NULL); |
258 | 262 | ||
@@ -561,6 +565,7 @@ static int userspace_status(struct dm_dirty_log *log, status_type_t status_type, | |||
561 | char *result, unsigned maxlen) | 565 | char *result, unsigned maxlen) |
562 | { | 566 | { |
563 | int r = 0; | 567 | int r = 0; |
568 | char *table_args; | ||
564 | size_t sz = (size_t)maxlen; | 569 | size_t sz = (size_t)maxlen; |
565 | struct log_c *lc = log->context; | 570 | struct log_c *lc = log->context; |
566 | 571 | ||
@@ -577,8 +582,12 @@ static int userspace_status(struct dm_dirty_log *log, status_type_t status_type, | |||
577 | break; | 582 | break; |
578 | case STATUSTYPE_TABLE: | 583 | case STATUSTYPE_TABLE: |
579 | sz = 0; | 584 | sz = 0; |
580 | DMEMIT("%s %u %s %s", log->type->name, lc->usr_argc + 1, | 585 | table_args = strstr(lc->usr_argv_str, " "); |
581 | lc->uuid, lc->usr_argv_str); | 586 | BUG_ON(!table_args); /* There will always be a ' ' */ |
587 | table_args++; | ||
588 | |||
589 | DMEMIT("%s %u %s %s ", log->type->name, lc->usr_argc, | ||
590 | lc->uuid, table_args); | ||
582 | break; | 591 | break; |
583 | } | 592 | } |
584 | return (r) ? 0 : (int)sz; | 593 | return (r) ? 0 : (int)sz; |
diff --git a/drivers/md/dm-log-userspace-transfer.c b/drivers/md/dm-log-userspace-transfer.c index 8ce74d95ae4d..ba0edad2d048 100644 --- a/drivers/md/dm-log-userspace-transfer.c +++ b/drivers/md/dm-log-userspace-transfer.c | |||
@@ -147,7 +147,8 @@ static void cn_ulog_callback(void *data) | |||
147 | 147 | ||
148 | /** | 148 | /** |
149 | * dm_consult_userspace | 149 | * dm_consult_userspace |
150 | * @uuid: log's uuid (must be DM_UUID_LEN in size) | 150 | * @uuid: log's universal unique identifier (must be DM_UUID_LEN in size) |
151 | * @luid: log's local unique identifier | ||
151 | * @request_type: found in include/linux/dm-log-userspace.h | 152 | * @request_type: found in include/linux/dm-log-userspace.h |
152 | * @data: data to tx to the server | 153 | * @data: data to tx to the server |
153 | * @data_size: size of data in bytes | 154 | * @data_size: size of data in bytes |
@@ -163,7 +164,7 @@ static void cn_ulog_callback(void *data) | |||
163 | * | 164 | * |
164 | * Returns: 0 on success, -EXXX on failure | 165 | * Returns: 0 on success, -EXXX on failure |
165 | **/ | 166 | **/ |
166 | int dm_consult_userspace(const char *uuid, int request_type, | 167 | int dm_consult_userspace(const char *uuid, uint64_t luid, int request_type, |
167 | char *data, size_t data_size, | 168 | char *data, size_t data_size, |
168 | char *rdata, size_t *rdata_size) | 169 | char *rdata, size_t *rdata_size) |
169 | { | 170 | { |
@@ -190,6 +191,7 @@ resend: | |||
190 | 191 | ||
191 | memset(tfr, 0, DM_ULOG_PREALLOCED_SIZE - overhead_size); | 192 | memset(tfr, 0, DM_ULOG_PREALLOCED_SIZE - overhead_size); |
192 | memcpy(tfr->uuid, uuid, DM_UUID_LEN); | 193 | memcpy(tfr->uuid, uuid, DM_UUID_LEN); |
194 | tfr->luid = luid; | ||
193 | tfr->seq = dm_ulog_seq++; | 195 | tfr->seq = dm_ulog_seq++; |
194 | 196 | ||
195 | /* | 197 | /* |
diff --git a/drivers/md/dm-log-userspace-transfer.h b/drivers/md/dm-log-userspace-transfer.h index c26d8e4e2710..04ee874f9153 100644 --- a/drivers/md/dm-log-userspace-transfer.h +++ b/drivers/md/dm-log-userspace-transfer.h | |||
@@ -11,7 +11,7 @@ | |||
11 | 11 | ||
12 | int dm_ulog_tfr_init(void); | 12 | int dm_ulog_tfr_init(void); |
13 | void dm_ulog_tfr_exit(void); | 13 | void dm_ulog_tfr_exit(void); |
14 | int dm_consult_userspace(const char *uuid, int request_type, | 14 | int dm_consult_userspace(const char *uuid, uint64_t luid, int request_type, |
15 | char *data, size_t data_size, | 15 | char *data, size_t data_size, |
16 | char *rdata, size_t *rdata_size); | 16 | char *rdata, size_t *rdata_size); |
17 | 17 | ||
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index 9726577cde49..33f179e66bf5 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c | |||
@@ -648,7 +648,13 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes) | |||
648 | */ | 648 | */ |
649 | dm_rh_inc_pending(ms->rh, &sync); | 649 | dm_rh_inc_pending(ms->rh, &sync); |
650 | dm_rh_inc_pending(ms->rh, &nosync); | 650 | dm_rh_inc_pending(ms->rh, &nosync); |
651 | ms->log_failure = dm_rh_flush(ms->rh) ? 1 : 0; | 651 | |
652 | /* | ||
653 | * If the flush fails on a previous call and succeeds here, | ||
654 | * we must not reset the log_failure variable. We need | ||
655 | * userspace interaction to do that. | ||
656 | */ | ||
657 | ms->log_failure = dm_rh_flush(ms->rh) ? 1 : ms->log_failure; | ||
652 | 658 | ||
653 | /* | 659 | /* |
654 | * Dispatch io. | 660 | * Dispatch io. |
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c index 6e3fe4f14934..d5b2e08750d5 100644 --- a/drivers/md/dm-snap-persistent.c +++ b/drivers/md/dm-snap-persistent.c | |||
@@ -106,6 +106,13 @@ struct pstore { | |||
106 | void *zero_area; | 106 | void *zero_area; |
107 | 107 | ||
108 | /* | 108 | /* |
109 | * An area used for header. The header can be written | ||
110 | * concurrently with metadata (when invalidating the snapshot), | ||
111 | * so it needs a separate buffer. | ||
112 | */ | ||
113 | void *header_area; | ||
114 | |||
115 | /* | ||
109 | * Used to keep track of which metadata area the data in | 116 | * Used to keep track of which metadata area the data in |
110 | * 'chunk' refers to. | 117 | * 'chunk' refers to. |
111 | */ | 118 | */ |
@@ -148,16 +155,27 @@ static int alloc_area(struct pstore *ps) | |||
148 | */ | 155 | */ |
149 | ps->area = vmalloc(len); | 156 | ps->area = vmalloc(len); |
150 | if (!ps->area) | 157 | if (!ps->area) |
151 | return r; | 158 | goto err_area; |
152 | 159 | ||
153 | ps->zero_area = vmalloc(len); | 160 | ps->zero_area = vmalloc(len); |
154 | if (!ps->zero_area) { | 161 | if (!ps->zero_area) |
155 | vfree(ps->area); | 162 | goto err_zero_area; |
156 | return r; | ||
157 | } | ||
158 | memset(ps->zero_area, 0, len); | 163 | memset(ps->zero_area, 0, len); |
159 | 164 | ||
165 | ps->header_area = vmalloc(len); | ||
166 | if (!ps->header_area) | ||
167 | goto err_header_area; | ||
168 | |||
160 | return 0; | 169 | return 0; |
170 | |||
171 | err_header_area: | ||
172 | vfree(ps->zero_area); | ||
173 | |||
174 | err_zero_area: | ||
175 | vfree(ps->area); | ||
176 | |||
177 | err_area: | ||
178 | return r; | ||
161 | } | 179 | } |
162 | 180 | ||
163 | static void free_area(struct pstore *ps) | 181 | static void free_area(struct pstore *ps) |
@@ -169,6 +187,10 @@ static void free_area(struct pstore *ps) | |||
169 | if (ps->zero_area) | 187 | if (ps->zero_area) |
170 | vfree(ps->zero_area); | 188 | vfree(ps->zero_area); |
171 | ps->zero_area = NULL; | 189 | ps->zero_area = NULL; |
190 | |||
191 | if (ps->header_area) | ||
192 | vfree(ps->header_area); | ||
193 | ps->header_area = NULL; | ||
172 | } | 194 | } |
173 | 195 | ||
174 | struct mdata_req { | 196 | struct mdata_req { |
@@ -188,7 +210,8 @@ static void do_metadata(struct work_struct *work) | |||
188 | /* | 210 | /* |
189 | * Read or write a chunk aligned and sized block of data from a device. | 211 | * Read or write a chunk aligned and sized block of data from a device. |
190 | */ | 212 | */ |
191 | static int chunk_io(struct pstore *ps, chunk_t chunk, int rw, int metadata) | 213 | static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw, |
214 | int metadata) | ||
192 | { | 215 | { |
193 | struct dm_io_region where = { | 216 | struct dm_io_region where = { |
194 | .bdev = ps->store->cow->bdev, | 217 | .bdev = ps->store->cow->bdev, |
@@ -198,7 +221,7 @@ static int chunk_io(struct pstore *ps, chunk_t chunk, int rw, int metadata) | |||
198 | struct dm_io_request io_req = { | 221 | struct dm_io_request io_req = { |
199 | .bi_rw = rw, | 222 | .bi_rw = rw, |
200 | .mem.type = DM_IO_VMA, | 223 | .mem.type = DM_IO_VMA, |
201 | .mem.ptr.vma = ps->area, | 224 | .mem.ptr.vma = area, |
202 | .client = ps->io_client, | 225 | .client = ps->io_client, |
203 | .notify.fn = NULL, | 226 | .notify.fn = NULL, |
204 | }; | 227 | }; |
@@ -240,7 +263,7 @@ static int area_io(struct pstore *ps, int rw) | |||
240 | 263 | ||
241 | chunk = area_location(ps, ps->current_area); | 264 | chunk = area_location(ps, ps->current_area); |
242 | 265 | ||
243 | r = chunk_io(ps, chunk, rw, 0); | 266 | r = chunk_io(ps, ps->area, chunk, rw, 0); |
244 | if (r) | 267 | if (r) |
245 | return r; | 268 | return r; |
246 | 269 | ||
@@ -254,20 +277,7 @@ static void zero_memory_area(struct pstore *ps) | |||
254 | 277 | ||
255 | static int zero_disk_area(struct pstore *ps, chunk_t area) | 278 | static int zero_disk_area(struct pstore *ps, chunk_t area) |
256 | { | 279 | { |
257 | struct dm_io_region where = { | 280 | return chunk_io(ps, ps->zero_area, area_location(ps, area), WRITE, 0); |
258 | .bdev = ps->store->cow->bdev, | ||
259 | .sector = ps->store->chunk_size * area_location(ps, area), | ||
260 | .count = ps->store->chunk_size, | ||
261 | }; | ||
262 | struct dm_io_request io_req = { | ||
263 | .bi_rw = WRITE, | ||
264 | .mem.type = DM_IO_VMA, | ||
265 | .mem.ptr.vma = ps->zero_area, | ||
266 | .client = ps->io_client, | ||
267 | .notify.fn = NULL, | ||
268 | }; | ||
269 | |||
270 | return dm_io(&io_req, 1, &where, NULL); | ||
271 | } | 281 | } |
272 | 282 | ||
273 | static int read_header(struct pstore *ps, int *new_snapshot) | 283 | static int read_header(struct pstore *ps, int *new_snapshot) |
@@ -276,6 +286,7 @@ static int read_header(struct pstore *ps, int *new_snapshot) | |||
276 | struct disk_header *dh; | 286 | struct disk_header *dh; |
277 | chunk_t chunk_size; | 287 | chunk_t chunk_size; |
278 | int chunk_size_supplied = 1; | 288 | int chunk_size_supplied = 1; |
289 | char *chunk_err; | ||
279 | 290 | ||
280 | /* | 291 | /* |
281 | * Use default chunk size (or hardsect_size, if larger) if none supplied | 292 | * Use default chunk size (or hardsect_size, if larger) if none supplied |
@@ -297,11 +308,11 @@ static int read_header(struct pstore *ps, int *new_snapshot) | |||
297 | if (r) | 308 | if (r) |
298 | return r; | 309 | return r; |
299 | 310 | ||
300 | r = chunk_io(ps, 0, READ, 1); | 311 | r = chunk_io(ps, ps->header_area, 0, READ, 1); |
301 | if (r) | 312 | if (r) |
302 | goto bad; | 313 | goto bad; |
303 | 314 | ||
304 | dh = (struct disk_header *) ps->area; | 315 | dh = ps->header_area; |
305 | 316 | ||
306 | if (le32_to_cpu(dh->magic) == 0) { | 317 | if (le32_to_cpu(dh->magic) == 0) { |
307 | *new_snapshot = 1; | 318 | *new_snapshot = 1; |
@@ -319,20 +330,25 @@ static int read_header(struct pstore *ps, int *new_snapshot) | |||
319 | ps->version = le32_to_cpu(dh->version); | 330 | ps->version = le32_to_cpu(dh->version); |
320 | chunk_size = le32_to_cpu(dh->chunk_size); | 331 | chunk_size = le32_to_cpu(dh->chunk_size); |
321 | 332 | ||
322 | if (!chunk_size_supplied || ps->store->chunk_size == chunk_size) | 333 | if (ps->store->chunk_size == chunk_size) |
323 | return 0; | 334 | return 0; |
324 | 335 | ||
325 | DMWARN("chunk size %llu in device metadata overrides " | 336 | if (chunk_size_supplied) |
326 | "table chunk size of %llu.", | 337 | DMWARN("chunk size %llu in device metadata overrides " |
327 | (unsigned long long)chunk_size, | 338 | "table chunk size of %llu.", |
328 | (unsigned long long)ps->store->chunk_size); | 339 | (unsigned long long)chunk_size, |
340 | (unsigned long long)ps->store->chunk_size); | ||
329 | 341 | ||
330 | /* We had a bogus chunk_size. Fix stuff up. */ | 342 | /* We had a bogus chunk_size. Fix stuff up. */ |
331 | free_area(ps); | 343 | free_area(ps); |
332 | 344 | ||
333 | ps->store->chunk_size = chunk_size; | 345 | r = dm_exception_store_set_chunk_size(ps->store, chunk_size, |
334 | ps->store->chunk_mask = chunk_size - 1; | 346 | &chunk_err); |
335 | ps->store->chunk_shift = ffs(chunk_size) - 1; | 347 | if (r) { |
348 | DMERR("invalid on-disk chunk size %llu: %s.", | ||
349 | (unsigned long long)chunk_size, chunk_err); | ||
350 | return r; | ||
351 | } | ||
336 | 352 | ||
337 | r = dm_io_client_resize(sectors_to_pages(ps->store->chunk_size), | 353 | r = dm_io_client_resize(sectors_to_pages(ps->store->chunk_size), |
338 | ps->io_client); | 354 | ps->io_client); |
@@ -351,15 +367,15 @@ static int write_header(struct pstore *ps) | |||
351 | { | 367 | { |
352 | struct disk_header *dh; | 368 | struct disk_header *dh; |
353 | 369 | ||
354 | memset(ps->area, 0, ps->store->chunk_size << SECTOR_SHIFT); | 370 | memset(ps->header_area, 0, ps->store->chunk_size << SECTOR_SHIFT); |
355 | 371 | ||
356 | dh = (struct disk_header *) ps->area; | 372 | dh = ps->header_area; |
357 | dh->magic = cpu_to_le32(SNAP_MAGIC); | 373 | dh->magic = cpu_to_le32(SNAP_MAGIC); |
358 | dh->valid = cpu_to_le32(ps->valid); | 374 | dh->valid = cpu_to_le32(ps->valid); |
359 | dh->version = cpu_to_le32(ps->version); | 375 | dh->version = cpu_to_le32(ps->version); |
360 | dh->chunk_size = cpu_to_le32(ps->store->chunk_size); | 376 | dh->chunk_size = cpu_to_le32(ps->store->chunk_size); |
361 | 377 | ||
362 | return chunk_io(ps, 0, WRITE, 1); | 378 | return chunk_io(ps, ps->header_area, 0, WRITE, 1); |
363 | } | 379 | } |
364 | 380 | ||
365 | /* | 381 | /* |
@@ -679,6 +695,8 @@ static int persistent_ctr(struct dm_exception_store *store, | |||
679 | ps->valid = 1; | 695 | ps->valid = 1; |
680 | ps->version = SNAPSHOT_DISK_VERSION; | 696 | ps->version = SNAPSHOT_DISK_VERSION; |
681 | ps->area = NULL; | 697 | ps->area = NULL; |
698 | ps->zero_area = NULL; | ||
699 | ps->header_area = NULL; | ||
682 | ps->next_free = 2; /* skipping the header and first area */ | 700 | ps->next_free = 2; /* skipping the header and first area */ |
683 | ps->current_committed = 0; | 701 | ps->current_committed = 0; |
684 | 702 | ||
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index d573165cd2b7..57f1bf7f3b7a 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c | |||
@@ -1176,6 +1176,15 @@ static int snapshot_status(struct dm_target *ti, status_type_t type, | |||
1176 | return 0; | 1176 | return 0; |
1177 | } | 1177 | } |
1178 | 1178 | ||
1179 | static int snapshot_iterate_devices(struct dm_target *ti, | ||
1180 | iterate_devices_callout_fn fn, void *data) | ||
1181 | { | ||
1182 | struct dm_snapshot *snap = ti->private; | ||
1183 | |||
1184 | return fn(ti, snap->origin, 0, ti->len, data); | ||
1185 | } | ||
1186 | |||
1187 | |||
1179 | /*----------------------------------------------------------------- | 1188 | /*----------------------------------------------------------------- |
1180 | * Origin methods | 1189 | * Origin methods |
1181 | *---------------------------------------------------------------*/ | 1190 | *---------------------------------------------------------------*/ |
@@ -1410,20 +1419,29 @@ static int origin_status(struct dm_target *ti, status_type_t type, char *result, | |||
1410 | return 0; | 1419 | return 0; |
1411 | } | 1420 | } |
1412 | 1421 | ||
1422 | static int origin_iterate_devices(struct dm_target *ti, | ||
1423 | iterate_devices_callout_fn fn, void *data) | ||
1424 | { | ||
1425 | struct dm_dev *dev = ti->private; | ||
1426 | |||
1427 | return fn(ti, dev, 0, ti->len, data); | ||
1428 | } | ||
1429 | |||
1413 | static struct target_type origin_target = { | 1430 | static struct target_type origin_target = { |
1414 | .name = "snapshot-origin", | 1431 | .name = "snapshot-origin", |
1415 | .version = {1, 6, 0}, | 1432 | .version = {1, 7, 0}, |
1416 | .module = THIS_MODULE, | 1433 | .module = THIS_MODULE, |
1417 | .ctr = origin_ctr, | 1434 | .ctr = origin_ctr, |
1418 | .dtr = origin_dtr, | 1435 | .dtr = origin_dtr, |
1419 | .map = origin_map, | 1436 | .map = origin_map, |
1420 | .resume = origin_resume, | 1437 | .resume = origin_resume, |
1421 | .status = origin_status, | 1438 | .status = origin_status, |
1439 | .iterate_devices = origin_iterate_devices, | ||
1422 | }; | 1440 | }; |
1423 | 1441 | ||
1424 | static struct target_type snapshot_target = { | 1442 | static struct target_type snapshot_target = { |
1425 | .name = "snapshot", | 1443 | .name = "snapshot", |
1426 | .version = {1, 6, 0}, | 1444 | .version = {1, 7, 0}, |
1427 | .module = THIS_MODULE, | 1445 | .module = THIS_MODULE, |
1428 | .ctr = snapshot_ctr, | 1446 | .ctr = snapshot_ctr, |
1429 | .dtr = snapshot_dtr, | 1447 | .dtr = snapshot_dtr, |
@@ -1431,6 +1449,7 @@ static struct target_type snapshot_target = { | |||
1431 | .end_io = snapshot_end_io, | 1449 | .end_io = snapshot_end_io, |
1432 | .resume = snapshot_resume, | 1450 | .resume = snapshot_resume, |
1433 | .status = snapshot_status, | 1451 | .status = snapshot_status, |
1452 | .iterate_devices = snapshot_iterate_devices, | ||
1434 | }; | 1453 | }; |
1435 | 1454 | ||
1436 | static int __init dm_snapshot_init(void) | 1455 | static int __init dm_snapshot_init(void) |
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index 4e0e5937e42a..3e563d251733 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c | |||
@@ -329,9 +329,19 @@ static int stripe_iterate_devices(struct dm_target *ti, | |||
329 | return ret; | 329 | return ret; |
330 | } | 330 | } |
331 | 331 | ||
332 | static void stripe_io_hints(struct dm_target *ti, | ||
333 | struct queue_limits *limits) | ||
334 | { | ||
335 | struct stripe_c *sc = ti->private; | ||
336 | unsigned chunk_size = (sc->chunk_mask + 1) << 9; | ||
337 | |||
338 | blk_limits_io_min(limits, chunk_size); | ||
339 | limits->io_opt = chunk_size * sc->stripes; | ||
340 | } | ||
341 | |||
332 | static struct target_type stripe_target = { | 342 | static struct target_type stripe_target = { |
333 | .name = "striped", | 343 | .name = "striped", |
334 | .version = {1, 2, 0}, | 344 | .version = {1, 3, 0}, |
335 | .module = THIS_MODULE, | 345 | .module = THIS_MODULE, |
336 | .ctr = stripe_ctr, | 346 | .ctr = stripe_ctr, |
337 | .dtr = stripe_dtr, | 347 | .dtr = stripe_dtr, |
@@ -339,6 +349,7 @@ static struct target_type stripe_target = { | |||
339 | .end_io = stripe_end_io, | 349 | .end_io = stripe_end_io, |
340 | .status = stripe_status, | 350 | .status = stripe_status, |
341 | .iterate_devices = stripe_iterate_devices, | 351 | .iterate_devices = stripe_iterate_devices, |
352 | .io_hints = stripe_io_hints, | ||
342 | }; | 353 | }; |
343 | 354 | ||
344 | int __init dm_stripe_init(void) | 355 | int __init dm_stripe_init(void) |
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index d952b3441913..1a6cb3c7822e 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c | |||
@@ -343,10 +343,10 @@ static void close_dev(struct dm_dev_internal *d, struct mapped_device *md) | |||
343 | } | 343 | } |
344 | 344 | ||
345 | /* | 345 | /* |
346 | * If possible, this checks an area of a destination device is valid. | 346 | * If possible, this checks an area of a destination device is invalid. |
347 | */ | 347 | */ |
348 | static int device_area_is_valid(struct dm_target *ti, struct dm_dev *dev, | 348 | static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev, |
349 | sector_t start, sector_t len, void *data) | 349 | sector_t start, sector_t len, void *data) |
350 | { | 350 | { |
351 | struct queue_limits *limits = data; | 351 | struct queue_limits *limits = data; |
352 | struct block_device *bdev = dev->bdev; | 352 | struct block_device *bdev = dev->bdev; |
@@ -357,36 +357,40 @@ static int device_area_is_valid(struct dm_target *ti, struct dm_dev *dev, | |||
357 | char b[BDEVNAME_SIZE]; | 357 | char b[BDEVNAME_SIZE]; |
358 | 358 | ||
359 | if (!dev_size) | 359 | if (!dev_size) |
360 | return 1; | 360 | return 0; |
361 | 361 | ||
362 | if ((start >= dev_size) || (start + len > dev_size)) { | 362 | if ((start >= dev_size) || (start + len > dev_size)) { |
363 | DMWARN("%s: %s too small for target", | 363 | DMWARN("%s: %s too small for target: " |
364 | dm_device_name(ti->table->md), bdevname(bdev, b)); | 364 | "start=%llu, len=%llu, dev_size=%llu", |
365 | return 0; | 365 | dm_device_name(ti->table->md), bdevname(bdev, b), |
366 | (unsigned long long)start, | ||
367 | (unsigned long long)len, | ||
368 | (unsigned long long)dev_size); | ||
369 | return 1; | ||
366 | } | 370 | } |
367 | 371 | ||
368 | if (logical_block_size_sectors <= 1) | 372 | if (logical_block_size_sectors <= 1) |
369 | return 1; | 373 | return 0; |
370 | 374 | ||
371 | if (start & (logical_block_size_sectors - 1)) { | 375 | if (start & (logical_block_size_sectors - 1)) { |
372 | DMWARN("%s: start=%llu not aligned to h/w " | 376 | DMWARN("%s: start=%llu not aligned to h/w " |
373 | "logical block size %hu of %s", | 377 | "logical block size %u of %s", |
374 | dm_device_name(ti->table->md), | 378 | dm_device_name(ti->table->md), |
375 | (unsigned long long)start, | 379 | (unsigned long long)start, |
376 | limits->logical_block_size, bdevname(bdev, b)); | 380 | limits->logical_block_size, bdevname(bdev, b)); |
377 | return 0; | 381 | return 1; |
378 | } | 382 | } |
379 | 383 | ||
380 | if (len & (logical_block_size_sectors - 1)) { | 384 | if (len & (logical_block_size_sectors - 1)) { |
381 | DMWARN("%s: len=%llu not aligned to h/w " | 385 | DMWARN("%s: len=%llu not aligned to h/w " |
382 | "logical block size %hu of %s", | 386 | "logical block size %u of %s", |
383 | dm_device_name(ti->table->md), | 387 | dm_device_name(ti->table->md), |
384 | (unsigned long long)len, | 388 | (unsigned long long)len, |
385 | limits->logical_block_size, bdevname(bdev, b)); | 389 | limits->logical_block_size, bdevname(bdev, b)); |
386 | return 0; | 390 | return 1; |
387 | } | 391 | } |
388 | 392 | ||
389 | return 1; | 393 | return 0; |
390 | } | 394 | } |
391 | 395 | ||
392 | /* | 396 | /* |
@@ -496,8 +500,15 @@ int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, | |||
496 | } | 500 | } |
497 | 501 | ||
498 | if (blk_stack_limits(limits, &q->limits, start << 9) < 0) | 502 | if (blk_stack_limits(limits, &q->limits, start << 9) < 0) |
499 | DMWARN("%s: target device %s is misaligned", | 503 | DMWARN("%s: target device %s is misaligned: " |
500 | dm_device_name(ti->table->md), bdevname(bdev, b)); | 504 | "physical_block_size=%u, logical_block_size=%u, " |
505 | "alignment_offset=%u, start=%llu", | ||
506 | dm_device_name(ti->table->md), bdevname(bdev, b), | ||
507 | q->limits.physical_block_size, | ||
508 | q->limits.logical_block_size, | ||
509 | q->limits.alignment_offset, | ||
510 | (unsigned long long) start << 9); | ||
511 | |||
501 | 512 | ||
502 | /* | 513 | /* |
503 | * Check if merge fn is supported. | 514 | * Check if merge fn is supported. |
@@ -698,7 +709,7 @@ static int validate_hardware_logical_block_alignment(struct dm_table *table, | |||
698 | 709 | ||
699 | if (remaining) { | 710 | if (remaining) { |
700 | DMWARN("%s: table line %u (start sect %llu len %llu) " | 711 | DMWARN("%s: table line %u (start sect %llu len %llu) " |
701 | "not aligned to h/w logical block size %hu", | 712 | "not aligned to h/w logical block size %u", |
702 | dm_device_name(table->md), i, | 713 | dm_device_name(table->md), i, |
703 | (unsigned long long) ti->begin, | 714 | (unsigned long long) ti->begin, |
704 | (unsigned long long) ti->len, | 715 | (unsigned long long) ti->len, |
@@ -996,12 +1007,16 @@ int dm_calculate_queue_limits(struct dm_table *table, | |||
996 | ti->type->iterate_devices(ti, dm_set_device_limits, | 1007 | ti->type->iterate_devices(ti, dm_set_device_limits, |
997 | &ti_limits); | 1008 | &ti_limits); |
998 | 1009 | ||
1010 | /* Set I/O hints portion of queue limits */ | ||
1011 | if (ti->type->io_hints) | ||
1012 | ti->type->io_hints(ti, &ti_limits); | ||
1013 | |||
999 | /* | 1014 | /* |
1000 | * Check each device area is consistent with the target's | 1015 | * Check each device area is consistent with the target's |
1001 | * overall queue limits. | 1016 | * overall queue limits. |
1002 | */ | 1017 | */ |
1003 | if (!ti->type->iterate_devices(ti, device_area_is_valid, | 1018 | if (ti->type->iterate_devices(ti, device_area_is_invalid, |
1004 | &ti_limits)) | 1019 | &ti_limits)) |
1005 | return -EINVAL; | 1020 | return -EINVAL; |
1006 | 1021 | ||
1007 | combine_limits: | 1022 | combine_limits: |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 8a311ea0d441..b4845b14740d 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -738,16 +738,22 @@ static void rq_completed(struct mapped_device *md, int run_queue) | |||
738 | dm_put(md); | 738 | dm_put(md); |
739 | } | 739 | } |
740 | 740 | ||
741 | static void free_rq_clone(struct request *clone) | ||
742 | { | ||
743 | struct dm_rq_target_io *tio = clone->end_io_data; | ||
744 | |||
745 | blk_rq_unprep_clone(clone); | ||
746 | free_rq_tio(tio); | ||
747 | } | ||
748 | |||
741 | static void dm_unprep_request(struct request *rq) | 749 | static void dm_unprep_request(struct request *rq) |
742 | { | 750 | { |
743 | struct request *clone = rq->special; | 751 | struct request *clone = rq->special; |
744 | struct dm_rq_target_io *tio = clone->end_io_data; | ||
745 | 752 | ||
746 | rq->special = NULL; | 753 | rq->special = NULL; |
747 | rq->cmd_flags &= ~REQ_DONTPREP; | 754 | rq->cmd_flags &= ~REQ_DONTPREP; |
748 | 755 | ||
749 | blk_rq_unprep_clone(clone); | 756 | free_rq_clone(clone); |
750 | free_rq_tio(tio); | ||
751 | } | 757 | } |
752 | 758 | ||
753 | /* | 759 | /* |
@@ -825,8 +831,7 @@ static void dm_end_request(struct request *clone, int error) | |||
825 | rq->sense_len = clone->sense_len; | 831 | rq->sense_len = clone->sense_len; |
826 | } | 832 | } |
827 | 833 | ||
828 | BUG_ON(clone->bio); | 834 | free_rq_clone(clone); |
829 | free_rq_tio(tio); | ||
830 | 835 | ||
831 | blk_end_request_all(rq, error); | 836 | blk_end_request_all(rq, error); |
832 | 837 | ||
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c index ae5fe91867e1..10ed195c0c1c 100644 --- a/drivers/mtd/devices/m25p80.c +++ b/drivers/mtd/devices/m25p80.c | |||
@@ -736,7 +736,7 @@ static int __devinit m25p_probe(struct spi_device *spi) | |||
736 | flash->partitioned = 1; | 736 | flash->partitioned = 1; |
737 | return add_mtd_partitions(&flash->mtd, parts, nr_parts); | 737 | return add_mtd_partitions(&flash->mtd, parts, nr_parts); |
738 | } | 738 | } |
739 | } else if (data->nr_parts) | 739 | } else if (data && data->nr_parts) |
740 | dev_warn(&spi->dev, "ignoring %d default partitions on %s\n", | 740 | dev_warn(&spi->dev, "ignoring %d default partitions on %s\n", |
741 | data->nr_parts, data->name); | 741 | data->nr_parts, data->name); |
742 | 742 | ||
diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c index fb86cacd5bdb..1002e1882996 100644 --- a/drivers/mtd/nftlcore.c +++ b/drivers/mtd/nftlcore.c | |||
@@ -135,16 +135,17 @@ static void nftl_remove_dev(struct mtd_blktrans_dev *dev) | |||
135 | int nftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len, | 135 | int nftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len, |
136 | size_t *retlen, uint8_t *buf) | 136 | size_t *retlen, uint8_t *buf) |
137 | { | 137 | { |
138 | loff_t mask = mtd->writesize - 1; | ||
138 | struct mtd_oob_ops ops; | 139 | struct mtd_oob_ops ops; |
139 | int res; | 140 | int res; |
140 | 141 | ||
141 | ops.mode = MTD_OOB_PLACE; | 142 | ops.mode = MTD_OOB_PLACE; |
142 | ops.ooboffs = offs & (mtd->writesize - 1); | 143 | ops.ooboffs = offs & mask; |
143 | ops.ooblen = len; | 144 | ops.ooblen = len; |
144 | ops.oobbuf = buf; | 145 | ops.oobbuf = buf; |
145 | ops.datbuf = NULL; | 146 | ops.datbuf = NULL; |
146 | 147 | ||
147 | res = mtd->read_oob(mtd, offs & ~(mtd->writesize - 1), &ops); | 148 | res = mtd->read_oob(mtd, offs & ~mask, &ops); |
148 | *retlen = ops.oobretlen; | 149 | *retlen = ops.oobretlen; |
149 | return res; | 150 | return res; |
150 | } | 151 | } |
@@ -155,16 +156,17 @@ int nftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len, | |||
155 | int nftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len, | 156 | int nftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len, |
156 | size_t *retlen, uint8_t *buf) | 157 | size_t *retlen, uint8_t *buf) |
157 | { | 158 | { |
159 | loff_t mask = mtd->writesize - 1; | ||
158 | struct mtd_oob_ops ops; | 160 | struct mtd_oob_ops ops; |
159 | int res; | 161 | int res; |
160 | 162 | ||
161 | ops.mode = MTD_OOB_PLACE; | 163 | ops.mode = MTD_OOB_PLACE; |
162 | ops.ooboffs = offs & (mtd->writesize - 1); | 164 | ops.ooboffs = offs & mask; |
163 | ops.ooblen = len; | 165 | ops.ooblen = len; |
164 | ops.oobbuf = buf; | 166 | ops.oobbuf = buf; |
165 | ops.datbuf = NULL; | 167 | ops.datbuf = NULL; |
166 | 168 | ||
167 | res = mtd->write_oob(mtd, offs & ~(mtd->writesize - 1), &ops); | 169 | res = mtd->write_oob(mtd, offs & ~mask, &ops); |
168 | *retlen = ops.oobretlen; | 170 | *retlen = ops.oobretlen; |
169 | return res; | 171 | return res; |
170 | } | 172 | } |
@@ -177,17 +179,18 @@ int nftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len, | |||
177 | static int nftl_write(struct mtd_info *mtd, loff_t offs, size_t len, | 179 | static int nftl_write(struct mtd_info *mtd, loff_t offs, size_t len, |
178 | size_t *retlen, uint8_t *buf, uint8_t *oob) | 180 | size_t *retlen, uint8_t *buf, uint8_t *oob) |
179 | { | 181 | { |
182 | loff_t mask = mtd->writesize - 1; | ||
180 | struct mtd_oob_ops ops; | 183 | struct mtd_oob_ops ops; |
181 | int res; | 184 | int res; |
182 | 185 | ||
183 | ops.mode = MTD_OOB_PLACE; | 186 | ops.mode = MTD_OOB_PLACE; |
184 | ops.ooboffs = offs; | 187 | ops.ooboffs = offs & mask; |
185 | ops.ooblen = mtd->oobsize; | 188 | ops.ooblen = mtd->oobsize; |
186 | ops.oobbuf = oob; | 189 | ops.oobbuf = oob; |
187 | ops.datbuf = buf; | 190 | ops.datbuf = buf; |
188 | ops.len = len; | 191 | ops.len = len; |
189 | 192 | ||
190 | res = mtd->write_oob(mtd, offs & ~(mtd->writesize - 1), &ops); | 193 | res = mtd->write_oob(mtd, offs & ~mask, &ops); |
191 | *retlen = ops.retlen; | 194 | *retlen = ops.retlen; |
192 | return res; | 195 | return res; |
193 | } | 196 | } |
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index e212f2c5448b..a00ec639c380 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c | |||
@@ -491,6 +491,7 @@ static int gfar_remove(struct of_device *ofdev) | |||
491 | 491 | ||
492 | dev_set_drvdata(&ofdev->dev, NULL); | 492 | dev_set_drvdata(&ofdev->dev, NULL); |
493 | 493 | ||
494 | unregister_netdev(priv->ndev); | ||
494 | iounmap(priv->regs); | 495 | iounmap(priv->regs); |
495 | free_netdev(priv->ndev); | 496 | free_netdev(priv->ndev); |
496 | 497 | ||
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c index 6dcac73b4d29..f593fbbb4e52 100644 --- a/drivers/net/wireless/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/ipw2x00/ipw2200.c | |||
@@ -2874,45 +2874,27 @@ static int ipw_fw_dma_add_command_block(struct ipw_priv *priv, | |||
2874 | return 0; | 2874 | return 0; |
2875 | } | 2875 | } |
2876 | 2876 | ||
2877 | static int ipw_fw_dma_add_buffer(struct ipw_priv *priv, | 2877 | static int ipw_fw_dma_add_buffer(struct ipw_priv *priv, dma_addr_t *src_address, |
2878 | u32 src_phys, u32 dest_address, u32 length) | 2878 | int nr, u32 dest_address, u32 len) |
2879 | { | 2879 | { |
2880 | u32 bytes_left = length; | 2880 | int ret, i; |
2881 | u32 src_offset = 0; | 2881 | u32 size; |
2882 | u32 dest_offset = 0; | 2882 | |
2883 | int status = 0; | ||
2884 | IPW_DEBUG_FW(">> \n"); | 2883 | IPW_DEBUG_FW(">> \n"); |
2885 | IPW_DEBUG_FW_INFO("src_phys=0x%x dest_address=0x%x length=0x%x\n", | 2884 | IPW_DEBUG_FW_INFO("nr=%d dest_address=0x%x len=0x%x\n", |
2886 | src_phys, dest_address, length); | 2885 | nr, dest_address, len); |
2887 | while (bytes_left > CB_MAX_LENGTH) { | 2886 | |
2888 | status = ipw_fw_dma_add_command_block(priv, | 2887 | for (i = 0; i < nr; i++) { |
2889 | src_phys + src_offset, | 2888 | size = min_t(u32, len - i * CB_MAX_LENGTH, CB_MAX_LENGTH); |
2890 | dest_address + | 2889 | ret = ipw_fw_dma_add_command_block(priv, src_address[i], |
2891 | dest_offset, | 2890 | dest_address + |
2892 | CB_MAX_LENGTH, 0, 0); | 2891 | i * CB_MAX_LENGTH, size, |
2893 | if (status) { | 2892 | 0, 0); |
2893 | if (ret) { | ||
2894 | IPW_DEBUG_FW_INFO(": Failed\n"); | 2894 | IPW_DEBUG_FW_INFO(": Failed\n"); |
2895 | return -1; | 2895 | return -1; |
2896 | } else | 2896 | } else |
2897 | IPW_DEBUG_FW_INFO(": Added new cb\n"); | 2897 | IPW_DEBUG_FW_INFO(": Added new cb\n"); |
2898 | |||
2899 | src_offset += CB_MAX_LENGTH; | ||
2900 | dest_offset += CB_MAX_LENGTH; | ||
2901 | bytes_left -= CB_MAX_LENGTH; | ||
2902 | } | ||
2903 | |||
2904 | /* add the buffer tail */ | ||
2905 | if (bytes_left > 0) { | ||
2906 | status = | ||
2907 | ipw_fw_dma_add_command_block(priv, src_phys + src_offset, | ||
2908 | dest_address + dest_offset, | ||
2909 | bytes_left, 0, 0); | ||
2910 | if (status) { | ||
2911 | IPW_DEBUG_FW_INFO(": Failed on the buffer tail\n"); | ||
2912 | return -1; | ||
2913 | } else | ||
2914 | IPW_DEBUG_FW_INFO | ||
2915 | (": Adding new cb - the buffer tail\n"); | ||
2916 | } | 2898 | } |
2917 | 2899 | ||
2918 | IPW_DEBUG_FW("<< \n"); | 2900 | IPW_DEBUG_FW("<< \n"); |
@@ -3160,59 +3142,91 @@ static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len) | |||
3160 | 3142 | ||
3161 | static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len) | 3143 | static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len) |
3162 | { | 3144 | { |
3163 | int rc = -1; | 3145 | int ret = -1; |
3164 | int offset = 0; | 3146 | int offset = 0; |
3165 | struct fw_chunk *chunk; | 3147 | struct fw_chunk *chunk; |
3166 | dma_addr_t shared_phys; | 3148 | int total_nr = 0; |
3167 | u8 *shared_virt; | 3149 | int i; |
3150 | struct pci_pool *pool; | ||
3151 | u32 *virts[CB_NUMBER_OF_ELEMENTS_SMALL]; | ||
3152 | dma_addr_t phys[CB_NUMBER_OF_ELEMENTS_SMALL]; | ||
3168 | 3153 | ||
3169 | IPW_DEBUG_TRACE("<< : \n"); | 3154 | IPW_DEBUG_TRACE("<< : \n"); |
3170 | shared_virt = pci_alloc_consistent(priv->pci_dev, len, &shared_phys); | ||
3171 | 3155 | ||
3172 | if (!shared_virt) | 3156 | pool = pci_pool_create("ipw2200", priv->pci_dev, CB_MAX_LENGTH, 0, 0); |
3157 | if (!pool) { | ||
3158 | IPW_ERROR("pci_pool_create failed\n"); | ||
3173 | return -ENOMEM; | 3159 | return -ENOMEM; |
3174 | 3160 | } | |
3175 | memmove(shared_virt, data, len); | ||
3176 | 3161 | ||
3177 | /* Start the Dma */ | 3162 | /* Start the Dma */ |
3178 | rc = ipw_fw_dma_enable(priv); | 3163 | ret = ipw_fw_dma_enable(priv); |
3179 | 3164 | ||
3180 | /* the DMA is already ready this would be a bug. */ | 3165 | /* the DMA is already ready this would be a bug. */ |
3181 | BUG_ON(priv->sram_desc.last_cb_index > 0); | 3166 | BUG_ON(priv->sram_desc.last_cb_index > 0); |
3182 | 3167 | ||
3183 | do { | 3168 | do { |
3169 | u32 chunk_len; | ||
3170 | u8 *start; | ||
3171 | int size; | ||
3172 | int nr = 0; | ||
3173 | |||
3184 | chunk = (struct fw_chunk *)(data + offset); | 3174 | chunk = (struct fw_chunk *)(data + offset); |
3185 | offset += sizeof(struct fw_chunk); | 3175 | offset += sizeof(struct fw_chunk); |
3176 | chunk_len = le32_to_cpu(chunk->length); | ||
3177 | start = data + offset; | ||
3178 | |||
3179 | nr = (chunk_len + CB_MAX_LENGTH - 1) / CB_MAX_LENGTH; | ||
3180 | for (i = 0; i < nr; i++) { | ||
3181 | virts[total_nr] = pci_pool_alloc(pool, GFP_KERNEL, | ||
3182 | &phys[total_nr]); | ||
3183 | if (!virts[total_nr]) { | ||
3184 | ret = -ENOMEM; | ||
3185 | goto out; | ||
3186 | } | ||
3187 | size = min_t(u32, chunk_len - i * CB_MAX_LENGTH, | ||
3188 | CB_MAX_LENGTH); | ||
3189 | memcpy(virts[total_nr], start, size); | ||
3190 | start += size; | ||
3191 | total_nr++; | ||
3192 | /* We don't support fw chunk larger than 64*8K */ | ||
3193 | BUG_ON(total_nr > CB_NUMBER_OF_ELEMENTS_SMALL); | ||
3194 | } | ||
3195 | |||
3186 | /* build DMA packet and queue up for sending */ | 3196 | /* build DMA packet and queue up for sending */ |
3187 | /* dma to chunk->address, the chunk->length bytes from data + | 3197 | /* dma to chunk->address, the chunk->length bytes from data + |
3188 | * offeset*/ | 3198 | * offeset*/ |
3189 | /* Dma loading */ | 3199 | /* Dma loading */ |
3190 | rc = ipw_fw_dma_add_buffer(priv, shared_phys + offset, | 3200 | ret = ipw_fw_dma_add_buffer(priv, &phys[total_nr - nr], |
3191 | le32_to_cpu(chunk->address), | 3201 | nr, le32_to_cpu(chunk->address), |
3192 | le32_to_cpu(chunk->length)); | 3202 | chunk_len); |
3193 | if (rc) { | 3203 | if (ret) { |
3194 | IPW_DEBUG_INFO("dmaAddBuffer Failed\n"); | 3204 | IPW_DEBUG_INFO("dmaAddBuffer Failed\n"); |
3195 | goto out; | 3205 | goto out; |
3196 | } | 3206 | } |
3197 | 3207 | ||
3198 | offset += le32_to_cpu(chunk->length); | 3208 | offset += chunk_len; |
3199 | } while (offset < len); | 3209 | } while (offset < len); |
3200 | 3210 | ||
3201 | /* Run the DMA and wait for the answer */ | 3211 | /* Run the DMA and wait for the answer */ |
3202 | rc = ipw_fw_dma_kick(priv); | 3212 | ret = ipw_fw_dma_kick(priv); |
3203 | if (rc) { | 3213 | if (ret) { |
3204 | IPW_ERROR("dmaKick Failed\n"); | 3214 | IPW_ERROR("dmaKick Failed\n"); |
3205 | goto out; | 3215 | goto out; |
3206 | } | 3216 | } |
3207 | 3217 | ||
3208 | rc = ipw_fw_dma_wait(priv); | 3218 | ret = ipw_fw_dma_wait(priv); |
3209 | if (rc) { | 3219 | if (ret) { |
3210 | IPW_ERROR("dmaWaitSync Failed\n"); | 3220 | IPW_ERROR("dmaWaitSync Failed\n"); |
3211 | goto out; | 3221 | goto out; |
3212 | } | 3222 | } |
3213 | out: | 3223 | out: |
3214 | pci_free_consistent(priv->pci_dev, len, shared_virt, shared_phys); | 3224 | for (i = 0; i < total_nr; i++) |
3215 | return rc; | 3225 | pci_pool_free(pool, virts[i], phys[i]); |
3226 | |||
3227 | pci_pool_destroy(pool); | ||
3228 | |||
3229 | return ret; | ||
3216 | } | 3230 | } |
3217 | 3231 | ||
3218 | /* stop nic */ | 3232 | /* stop nic */ |
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index e3a87210e947..e03fe98f0619 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c | |||
@@ -598,6 +598,29 @@ int pci_iov_resource_bar(struct pci_dev *dev, int resno, | |||
598 | } | 598 | } |
599 | 599 | ||
600 | /** | 600 | /** |
601 | * pci_sriov_resource_alignment - get resource alignment for VF BAR | ||
602 | * @dev: the PCI device | ||
603 | * @resno: the resource number | ||
604 | * | ||
605 | * Returns the alignment of the VF BAR found in the SR-IOV capability. | ||
606 | * This is not the same as the resource size which is defined as | ||
607 | * the VF BAR size multiplied by the number of VFs. The alignment | ||
608 | * is just the VF BAR size. | ||
609 | */ | ||
610 | int pci_sriov_resource_alignment(struct pci_dev *dev, int resno) | ||
611 | { | ||
612 | struct resource tmp; | ||
613 | enum pci_bar_type type; | ||
614 | int reg = pci_iov_resource_bar(dev, resno, &type); | ||
615 | |||
616 | if (!reg) | ||
617 | return 0; | ||
618 | |||
619 | __pci_read_base(dev, type, &tmp, reg); | ||
620 | return resource_alignment(&tmp); | ||
621 | } | ||
622 | |||
623 | /** | ||
601 | * pci_restore_iov_state - restore the state of the IOV capability | 624 | * pci_restore_iov_state - restore the state of the IOV capability |
602 | * @dev: the PCI device | 625 | * @dev: the PCI device |
603 | */ | 626 | */ |
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index f73bcbedf37c..5ff4d25bf0e9 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h | |||
@@ -243,6 +243,7 @@ extern int pci_iov_init(struct pci_dev *dev); | |||
243 | extern void pci_iov_release(struct pci_dev *dev); | 243 | extern void pci_iov_release(struct pci_dev *dev); |
244 | extern int pci_iov_resource_bar(struct pci_dev *dev, int resno, | 244 | extern int pci_iov_resource_bar(struct pci_dev *dev, int resno, |
245 | enum pci_bar_type *type); | 245 | enum pci_bar_type *type); |
246 | extern int pci_sriov_resource_alignment(struct pci_dev *dev, int resno); | ||
246 | extern void pci_restore_iov_state(struct pci_dev *dev); | 247 | extern void pci_restore_iov_state(struct pci_dev *dev); |
247 | extern int pci_iov_bus_range(struct pci_bus *bus); | 248 | extern int pci_iov_bus_range(struct pci_bus *bus); |
248 | 249 | ||
@@ -298,4 +299,16 @@ static inline int pci_ats_enabled(struct pci_dev *dev) | |||
298 | } | 299 | } |
299 | #endif /* CONFIG_PCI_IOV */ | 300 | #endif /* CONFIG_PCI_IOV */ |
300 | 301 | ||
302 | static inline int pci_resource_alignment(struct pci_dev *dev, | ||
303 | struct resource *res) | ||
304 | { | ||
305 | #ifdef CONFIG_PCI_IOV | ||
306 | int resno = res - dev->resource; | ||
307 | |||
308 | if (resno >= PCI_IOV_RESOURCES && resno <= PCI_IOV_RESOURCE_END) | ||
309 | return pci_sriov_resource_alignment(dev, resno); | ||
310 | #endif | ||
311 | return resource_alignment(res); | ||
312 | } | ||
313 | |||
301 | #endif /* DRIVERS_PCI_H */ | 314 | #endif /* DRIVERS_PCI_H */ |
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index b636e245445d..7c443b4583ab 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c | |||
@@ -25,7 +25,7 @@ | |||
25 | #include <linux/ioport.h> | 25 | #include <linux/ioport.h> |
26 | #include <linux/cache.h> | 26 | #include <linux/cache.h> |
27 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
28 | 28 | #include "pci.h" | |
29 | 29 | ||
30 | static void pbus_assign_resources_sorted(const struct pci_bus *bus) | 30 | static void pbus_assign_resources_sorted(const struct pci_bus *bus) |
31 | { | 31 | { |
@@ -384,7 +384,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long | |||
384 | continue; | 384 | continue; |
385 | r_size = resource_size(r); | 385 | r_size = resource_size(r); |
386 | /* For bridges size != alignment */ | 386 | /* For bridges size != alignment */ |
387 | align = resource_alignment(r); | 387 | align = pci_resource_alignment(dev, r); |
388 | order = __ffs(align) - 20; | 388 | order = __ffs(align) - 20; |
389 | if (order > 11) { | 389 | if (order > 11) { |
390 | dev_warn(&dev->dev, "BAR %d bad alignment %llx: " | 390 | dev_warn(&dev->dev, "BAR %d bad alignment %llx: " |
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c index 1898c7b47907..88cdd1a937d6 100644 --- a/drivers/pci/setup-res.c +++ b/drivers/pci/setup-res.c | |||
@@ -144,7 +144,7 @@ static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev, | |||
144 | 144 | ||
145 | size = resource_size(res); | 145 | size = resource_size(res); |
146 | min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM; | 146 | min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM; |
147 | align = resource_alignment(res); | 147 | align = pci_resource_alignment(dev, res); |
148 | 148 | ||
149 | /* First, try exact prefetching match.. */ | 149 | /* First, try exact prefetching match.. */ |
150 | ret = pci_bus_alloc_resource(bus, res, size, align, min, | 150 | ret = pci_bus_alloc_resource(bus, res, size, align, min, |
@@ -178,7 +178,7 @@ int pci_assign_resource(struct pci_dev *dev, int resno) | |||
178 | struct pci_bus *bus; | 178 | struct pci_bus *bus; |
179 | int ret; | 179 | int ret; |
180 | 180 | ||
181 | align = resource_alignment(res); | 181 | align = pci_resource_alignment(dev, res); |
182 | if (!align) { | 182 | if (!align) { |
183 | dev_info(&dev->dev, "BAR %d: can't allocate resource (bogus " | 183 | dev_info(&dev->dev, "BAR %d: can't allocate resource (bogus " |
184 | "alignment) %pR flags %#lx\n", | 184 | "alignment) %pR flags %#lx\n", |
@@ -259,7 +259,7 @@ void pdev_sort_resources(struct pci_dev *dev, struct resource_list *head) | |||
259 | if (!(r->flags) || r->parent) | 259 | if (!(r->flags) || r->parent) |
260 | continue; | 260 | continue; |
261 | 261 | ||
262 | r_align = resource_alignment(r); | 262 | r_align = pci_resource_alignment(dev, r); |
263 | if (!r_align) { | 263 | if (!r_align) { |
264 | dev_warn(&dev->dev, "BAR %d: bogus alignment " | 264 | dev_warn(&dev->dev, "BAR %d: bogus alignment " |
265 | "%pR flags %#lx\n", | 265 | "%pR flags %#lx\n", |
@@ -271,7 +271,7 @@ void pdev_sort_resources(struct pci_dev *dev, struct resource_list *head) | |||
271 | struct resource_list *ln = list->next; | 271 | struct resource_list *ln = list->next; |
272 | 272 | ||
273 | if (ln) | 273 | if (ln) |
274 | align = resource_alignment(ln->res); | 274 | align = pci_resource_alignment(ln->dev, ln->res); |
275 | 275 | ||
276 | if (r_align > align) { | 276 | if (r_align > align) { |
277 | tmp = kmalloc(sizeof(*tmp), GFP_KERNEL); | 277 | tmp = kmalloc(sizeof(*tmp), GFP_KERNEL); |
diff --git a/fs/compat.c b/fs/compat.c index 94502dab972a..6d6f98fe64a0 100644 --- a/fs/compat.c +++ b/fs/compat.c | |||
@@ -1485,20 +1485,15 @@ int compat_do_execve(char * filename, | |||
1485 | if (!bprm) | 1485 | if (!bprm) |
1486 | goto out_files; | 1486 | goto out_files; |
1487 | 1487 | ||
1488 | retval = -ERESTARTNOINTR; | 1488 | retval = prepare_bprm_creds(bprm); |
1489 | if (mutex_lock_interruptible(¤t->cred_guard_mutex)) | 1489 | if (retval) |
1490 | goto out_free; | 1490 | goto out_free; |
1491 | current->in_execve = 1; | ||
1492 | |||
1493 | retval = -ENOMEM; | ||
1494 | bprm->cred = prepare_exec_creds(); | ||
1495 | if (!bprm->cred) | ||
1496 | goto out_unlock; | ||
1497 | 1491 | ||
1498 | retval = check_unsafe_exec(bprm); | 1492 | retval = check_unsafe_exec(bprm); |
1499 | if (retval < 0) | 1493 | if (retval < 0) |
1500 | goto out_unlock; | 1494 | goto out_free; |
1501 | clear_in_exec = retval; | 1495 | clear_in_exec = retval; |
1496 | current->in_execve = 1; | ||
1502 | 1497 | ||
1503 | file = open_exec(filename); | 1498 | file = open_exec(filename); |
1504 | retval = PTR_ERR(file); | 1499 | retval = PTR_ERR(file); |
@@ -1547,7 +1542,6 @@ int compat_do_execve(char * filename, | |||
1547 | /* execve succeeded */ | 1542 | /* execve succeeded */ |
1548 | current->fs->in_exec = 0; | 1543 | current->fs->in_exec = 0; |
1549 | current->in_execve = 0; | 1544 | current->in_execve = 0; |
1550 | mutex_unlock(¤t->cred_guard_mutex); | ||
1551 | acct_update_integrals(current); | 1545 | acct_update_integrals(current); |
1552 | free_bprm(bprm); | 1546 | free_bprm(bprm); |
1553 | if (displaced) | 1547 | if (displaced) |
@@ -1567,10 +1561,7 @@ out_file: | |||
1567 | out_unmark: | 1561 | out_unmark: |
1568 | if (clear_in_exec) | 1562 | if (clear_in_exec) |
1569 | current->fs->in_exec = 0; | 1563 | current->fs->in_exec = 0; |
1570 | |||
1571 | out_unlock: | ||
1572 | current->in_execve = 0; | 1564 | current->in_execve = 0; |
1573 | mutex_unlock(¤t->cred_guard_mutex); | ||
1574 | 1565 | ||
1575 | out_free: | 1566 | out_free: |
1576 | free_bprm(bprm); | 1567 | free_bprm(bprm); |
@@ -1016,6 +1016,35 @@ out: | |||
1016 | EXPORT_SYMBOL(flush_old_exec); | 1016 | EXPORT_SYMBOL(flush_old_exec); |
1017 | 1017 | ||
1018 | /* | 1018 | /* |
1019 | * Prepare credentials and lock ->cred_guard_mutex. | ||
1020 | * install_exec_creds() commits the new creds and drops the lock. | ||
1021 | * Or, if exec fails before, free_bprm() should release ->cred and | ||
1022 | * and unlock. | ||
1023 | */ | ||
1024 | int prepare_bprm_creds(struct linux_binprm *bprm) | ||
1025 | { | ||
1026 | if (mutex_lock_interruptible(¤t->cred_guard_mutex)) | ||
1027 | return -ERESTARTNOINTR; | ||
1028 | |||
1029 | bprm->cred = prepare_exec_creds(); | ||
1030 | if (likely(bprm->cred)) | ||
1031 | return 0; | ||
1032 | |||
1033 | mutex_unlock(¤t->cred_guard_mutex); | ||
1034 | return -ENOMEM; | ||
1035 | } | ||
1036 | |||
1037 | void free_bprm(struct linux_binprm *bprm) | ||
1038 | { | ||
1039 | free_arg_pages(bprm); | ||
1040 | if (bprm->cred) { | ||
1041 | mutex_unlock(¤t->cred_guard_mutex); | ||
1042 | abort_creds(bprm->cred); | ||
1043 | } | ||
1044 | kfree(bprm); | ||
1045 | } | ||
1046 | |||
1047 | /* | ||
1019 | * install the new credentials for this executable | 1048 | * install the new credentials for this executable |
1020 | */ | 1049 | */ |
1021 | void install_exec_creds(struct linux_binprm *bprm) | 1050 | void install_exec_creds(struct linux_binprm *bprm) |
@@ -1024,12 +1053,13 @@ void install_exec_creds(struct linux_binprm *bprm) | |||
1024 | 1053 | ||
1025 | commit_creds(bprm->cred); | 1054 | commit_creds(bprm->cred); |
1026 | bprm->cred = NULL; | 1055 | bprm->cred = NULL; |
1027 | 1056 | /* | |
1028 | /* cred_guard_mutex must be held at least to this point to prevent | 1057 | * cred_guard_mutex must be held at least to this point to prevent |
1029 | * ptrace_attach() from altering our determination of the task's | 1058 | * ptrace_attach() from altering our determination of the task's |
1030 | * credentials; any time after this it may be unlocked */ | 1059 | * credentials; any time after this it may be unlocked. |
1031 | 1060 | */ | |
1032 | security_bprm_committed_creds(bprm); | 1061 | security_bprm_committed_creds(bprm); |
1062 | mutex_unlock(¤t->cred_guard_mutex); | ||
1033 | } | 1063 | } |
1034 | EXPORT_SYMBOL(install_exec_creds); | 1064 | EXPORT_SYMBOL(install_exec_creds); |
1035 | 1065 | ||
@@ -1246,14 +1276,6 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs) | |||
1246 | 1276 | ||
1247 | EXPORT_SYMBOL(search_binary_handler); | 1277 | EXPORT_SYMBOL(search_binary_handler); |
1248 | 1278 | ||
1249 | void free_bprm(struct linux_binprm *bprm) | ||
1250 | { | ||
1251 | free_arg_pages(bprm); | ||
1252 | if (bprm->cred) | ||
1253 | abort_creds(bprm->cred); | ||
1254 | kfree(bprm); | ||
1255 | } | ||
1256 | |||
1257 | /* | 1279 | /* |
1258 | * sys_execve() executes a new program. | 1280 | * sys_execve() executes a new program. |
1259 | */ | 1281 | */ |
@@ -1277,20 +1299,15 @@ int do_execve(char * filename, | |||
1277 | if (!bprm) | 1299 | if (!bprm) |
1278 | goto out_files; | 1300 | goto out_files; |
1279 | 1301 | ||
1280 | retval = -ERESTARTNOINTR; | 1302 | retval = prepare_bprm_creds(bprm); |
1281 | if (mutex_lock_interruptible(¤t->cred_guard_mutex)) | 1303 | if (retval) |
1282 | goto out_free; | 1304 | goto out_free; |
1283 | current->in_execve = 1; | ||
1284 | |||
1285 | retval = -ENOMEM; | ||
1286 | bprm->cred = prepare_exec_creds(); | ||
1287 | if (!bprm->cred) | ||
1288 | goto out_unlock; | ||
1289 | 1305 | ||
1290 | retval = check_unsafe_exec(bprm); | 1306 | retval = check_unsafe_exec(bprm); |
1291 | if (retval < 0) | 1307 | if (retval < 0) |
1292 | goto out_unlock; | 1308 | goto out_free; |
1293 | clear_in_exec = retval; | 1309 | clear_in_exec = retval; |
1310 | current->in_execve = 1; | ||
1294 | 1311 | ||
1295 | file = open_exec(filename); | 1312 | file = open_exec(filename); |
1296 | retval = PTR_ERR(file); | 1313 | retval = PTR_ERR(file); |
@@ -1340,7 +1357,6 @@ int do_execve(char * filename, | |||
1340 | /* execve succeeded */ | 1357 | /* execve succeeded */ |
1341 | current->fs->in_exec = 0; | 1358 | current->fs->in_exec = 0; |
1342 | current->in_execve = 0; | 1359 | current->in_execve = 0; |
1343 | mutex_unlock(¤t->cred_guard_mutex); | ||
1344 | acct_update_integrals(current); | 1360 | acct_update_integrals(current); |
1345 | free_bprm(bprm); | 1361 | free_bprm(bprm); |
1346 | if (displaced) | 1362 | if (displaced) |
@@ -1360,10 +1376,7 @@ out_file: | |||
1360 | out_unmark: | 1376 | out_unmark: |
1361 | if (clear_in_exec) | 1377 | if (clear_in_exec) |
1362 | current->fs->in_exec = 0; | 1378 | current->fs->in_exec = 0; |
1363 | |||
1364 | out_unlock: | ||
1365 | current->in_execve = 0; | 1379 | current->in_execve = 0; |
1366 | mutex_unlock(¤t->cred_guard_mutex); | ||
1367 | 1380 | ||
1368 | out_free: | 1381 | out_free: |
1369 | free_bprm(bprm); | 1382 | free_bprm(bprm); |
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c index e1dedb0f7873..78d9b925fc94 100644 --- a/fs/ext2/namei.c +++ b/fs/ext2/namei.c | |||
@@ -362,6 +362,10 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry, | |||
362 | if (dir_de) { | 362 | if (dir_de) { |
363 | if (old_dir != new_dir) | 363 | if (old_dir != new_dir) |
364 | ext2_set_link(old_inode, dir_de, dir_page, new_dir, 0); | 364 | ext2_set_link(old_inode, dir_de, dir_page, new_dir, 0); |
365 | else { | ||
366 | kunmap(dir_page); | ||
367 | page_cache_release(dir_page); | ||
368 | } | ||
365 | inode_dec_link_count(old_dir); | 369 | inode_dec_link_count(old_dir); |
366 | } | 370 | } |
367 | return 0; | 371 | return 0; |
diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c index d9a721e6db70..5ef7bac265e5 100644 --- a/fs/jffs2/wbuf.c +++ b/fs/jffs2/wbuf.c | |||
@@ -1268,10 +1268,20 @@ int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c) { | |||
1268 | if (!c->wbuf) | 1268 | if (!c->wbuf) |
1269 | return -ENOMEM; | 1269 | return -ENOMEM; |
1270 | 1270 | ||
1271 | #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY | ||
1272 | c->wbuf_verify = kmalloc(c->wbuf_pagesize, GFP_KERNEL); | ||
1273 | if (!c->wbuf_verify) { | ||
1274 | kfree(c->wbuf); | ||
1275 | return -ENOMEM; | ||
1276 | } | ||
1277 | #endif | ||
1271 | return 0; | 1278 | return 0; |
1272 | } | 1279 | } |
1273 | 1280 | ||
1274 | void jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info *c) { | 1281 | void jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info *c) { |
1282 | #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY | ||
1283 | kfree(c->wbuf_verify); | ||
1284 | #endif | ||
1275 | kfree(c->wbuf); | 1285 | kfree(c->wbuf); |
1276 | } | 1286 | } |
1277 | 1287 | ||
diff --git a/fs/namei.c b/fs/namei.c index f3c5b278895a..1f13751693a5 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
@@ -1542,28 +1542,31 @@ int may_open(struct path *path, int acc_mode, int flag) | |||
1542 | * An append-only file must be opened in append mode for writing. | 1542 | * An append-only file must be opened in append mode for writing. |
1543 | */ | 1543 | */ |
1544 | if (IS_APPEND(inode)) { | 1544 | if (IS_APPEND(inode)) { |
1545 | error = -EPERM; | ||
1545 | if ((flag & FMODE_WRITE) && !(flag & O_APPEND)) | 1546 | if ((flag & FMODE_WRITE) && !(flag & O_APPEND)) |
1546 | return -EPERM; | 1547 | goto err_out; |
1547 | if (flag & O_TRUNC) | 1548 | if (flag & O_TRUNC) |
1548 | return -EPERM; | 1549 | goto err_out; |
1549 | } | 1550 | } |
1550 | 1551 | ||
1551 | /* O_NOATIME can only be set by the owner or superuser */ | 1552 | /* O_NOATIME can only be set by the owner or superuser */ |
1552 | if (flag & O_NOATIME) | 1553 | if (flag & O_NOATIME) |
1553 | if (!is_owner_or_cap(inode)) | 1554 | if (!is_owner_or_cap(inode)) { |
1554 | return -EPERM; | 1555 | error = -EPERM; |
1556 | goto err_out; | ||
1557 | } | ||
1555 | 1558 | ||
1556 | /* | 1559 | /* |
1557 | * Ensure there are no outstanding leases on the file. | 1560 | * Ensure there are no outstanding leases on the file. |
1558 | */ | 1561 | */ |
1559 | error = break_lease(inode, flag); | 1562 | error = break_lease(inode, flag); |
1560 | if (error) | 1563 | if (error) |
1561 | return error; | 1564 | goto err_out; |
1562 | 1565 | ||
1563 | if (flag & O_TRUNC) { | 1566 | if (flag & O_TRUNC) { |
1564 | error = get_write_access(inode); | 1567 | error = get_write_access(inode); |
1565 | if (error) | 1568 | if (error) |
1566 | return error; | 1569 | goto err_out; |
1567 | 1570 | ||
1568 | /* | 1571 | /* |
1569 | * Refuse to truncate files with mandatory locks held on them. | 1572 | * Refuse to truncate files with mandatory locks held on them. |
@@ -1581,12 +1584,17 @@ int may_open(struct path *path, int acc_mode, int flag) | |||
1581 | } | 1584 | } |
1582 | put_write_access(inode); | 1585 | put_write_access(inode); |
1583 | if (error) | 1586 | if (error) |
1584 | return error; | 1587 | goto err_out; |
1585 | } else | 1588 | } else |
1586 | if (flag & FMODE_WRITE) | 1589 | if (flag & FMODE_WRITE) |
1587 | vfs_dq_init(inode); | 1590 | vfs_dq_init(inode); |
1588 | 1591 | ||
1589 | return 0; | 1592 | return 0; |
1593 | err_out: | ||
1594 | ima_counts_put(path, acc_mode ? | ||
1595 | acc_mode & (MAY_READ | MAY_WRITE | MAY_EXEC) : | ||
1596 | ACC_MODE(flag) & (MAY_READ | MAY_WRITE)); | ||
1597 | return error; | ||
1590 | } | 1598 | } |
1591 | 1599 | ||
1592 | /* | 1600 | /* |
diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c index 7e0b61be212e..c668bca579c1 100644 --- a/fs/nilfs2/btnode.c +++ b/fs/nilfs2/btnode.c | |||
@@ -209,6 +209,7 @@ int nilfs_btnode_prepare_change_key(struct address_space *btnc, | |||
209 | * We cannot call radix_tree_preload for the kernels older | 209 | * We cannot call radix_tree_preload for the kernels older |
210 | * than 2.6.23, because it is not exported for modules. | 210 | * than 2.6.23, because it is not exported for modules. |
211 | */ | 211 | */ |
212 | retry: | ||
212 | err = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM); | 213 | err = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM); |
213 | if (err) | 214 | if (err) |
214 | goto failed_unlock; | 215 | goto failed_unlock; |
@@ -219,7 +220,6 @@ int nilfs_btnode_prepare_change_key(struct address_space *btnc, | |||
219 | (unsigned long long)oldkey, | 220 | (unsigned long long)oldkey, |
220 | (unsigned long long)newkey); | 221 | (unsigned long long)newkey); |
221 | 222 | ||
222 | retry: | ||
223 | spin_lock_irq(&btnc->tree_lock); | 223 | spin_lock_irq(&btnc->tree_lock); |
224 | err = radix_tree_insert(&btnc->page_tree, newkey, obh->b_page); | 224 | err = radix_tree_insert(&btnc->page_tree, newkey, obh->b_page); |
225 | spin_unlock_irq(&btnc->tree_lock); | 225 | spin_unlock_irq(&btnc->tree_lock); |
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index b401654011a2..8a1e61545f41 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c | |||
@@ -1747,8 +1747,8 @@ int ocfs2_write_begin_nolock(struct address_space *mapping, | |||
1747 | * we know zeros will only be needed in the first and/or last cluster. | 1747 | * we know zeros will only be needed in the first and/or last cluster. |
1748 | */ | 1748 | */ |
1749 | if (clusters_to_alloc || extents_to_split || | 1749 | if (clusters_to_alloc || extents_to_split || |
1750 | wc->w_desc[0].c_needs_zero || | 1750 | (wc->w_clen && (wc->w_desc[0].c_needs_zero || |
1751 | wc->w_desc[wc->w_clen - 1].c_needs_zero) | 1751 | wc->w_desc[wc->w_clen - 1].c_needs_zero))) |
1752 | cluster_of_pages = 1; | 1752 | cluster_of_pages = 1; |
1753 | else | 1753 | else |
1754 | cluster_of_pages = 0; | 1754 | cluster_of_pages = 0; |
diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c index 2f28b7de2c8d..b4957c7d9fe2 100644 --- a/fs/ocfs2/dcache.c +++ b/fs/ocfs2/dcache.c | |||
@@ -85,6 +85,17 @@ static int ocfs2_dentry_revalidate(struct dentry *dentry, | |||
85 | goto bail; | 85 | goto bail; |
86 | } | 86 | } |
87 | 87 | ||
88 | /* | ||
89 | * If the last lookup failed to create dentry lock, let us | ||
90 | * redo it. | ||
91 | */ | ||
92 | if (!dentry->d_fsdata) { | ||
93 | mlog(0, "Inode %llu doesn't have dentry lock, " | ||
94 | "returning false\n", | ||
95 | (unsigned long long)OCFS2_I(inode)->ip_blkno); | ||
96 | goto bail; | ||
97 | } | ||
98 | |||
88 | ret = 1; | 99 | ret = 1; |
89 | 100 | ||
90 | bail: | 101 | bail: |
diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c index 0882d166239a..eafcc7c18706 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl32.c +++ b/fs/xfs/linux-2.6/xfs_ioctl32.c | |||
@@ -619,7 +619,7 @@ xfs_file_compat_ioctl( | |||
619 | case XFS_IOC_GETVERSION_32: | 619 | case XFS_IOC_GETVERSION_32: |
620 | cmd = _NATIVE_IOC(cmd, long); | 620 | cmd = _NATIVE_IOC(cmd, long); |
621 | return xfs_file_ioctl(filp, cmd, p); | 621 | return xfs_file_ioctl(filp, cmd, p); |
622 | case XFS_IOC_SWAPEXT: { | 622 | case XFS_IOC_SWAPEXT_32: { |
623 | struct xfs_swapext sxp; | 623 | struct xfs_swapext sxp; |
624 | struct compat_xfs_swapext __user *sxu = arg; | 624 | struct compat_xfs_swapext __user *sxu = arg; |
625 | 625 | ||
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index 010545436efa..5a2bd1cc9656 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h | |||
@@ -137,6 +137,7 @@ struct crypto_instance *crypto_alloc_instance(const char *name, | |||
137 | void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen); | 137 | void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen); |
138 | int crypto_enqueue_request(struct crypto_queue *queue, | 138 | int crypto_enqueue_request(struct crypto_queue *queue, |
139 | struct crypto_async_request *request); | 139 | struct crypto_async_request *request); |
140 | void *__crypto_dequeue_request(struct crypto_queue *queue, unsigned int offset); | ||
140 | struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue); | 141 | struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue); |
141 | int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm); | 142 | int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm); |
142 | 143 | ||
diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h index 2ba42cd7d6aa..3a748a6bf772 100644 --- a/include/crypto/internal/skcipher.h +++ b/include/crypto/internal/skcipher.h | |||
@@ -79,8 +79,8 @@ static inline int skcipher_enqueue_givcrypt( | |||
79 | static inline struct skcipher_givcrypt_request *skcipher_dequeue_givcrypt( | 79 | static inline struct skcipher_givcrypt_request *skcipher_dequeue_givcrypt( |
80 | struct crypto_queue *queue) | 80 | struct crypto_queue *queue) |
81 | { | 81 | { |
82 | return container_of(ablkcipher_dequeue_request(queue), | 82 | return __crypto_dequeue_request( |
83 | struct skcipher_givcrypt_request, creq); | 83 | queue, offsetof(struct skcipher_givcrypt_request, creq.base)); |
84 | } | 84 | } |
85 | 85 | ||
86 | static inline void *skcipher_givcrypt_reqctx( | 86 | static inline void *skcipher_givcrypt_reqctx( |
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index 61ee18c1bdb4..2046b5b8af48 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h | |||
@@ -117,6 +117,7 @@ extern int setup_arg_pages(struct linux_binprm * bprm, | |||
117 | int executable_stack); | 117 | int executable_stack); |
118 | extern int bprm_mm_init(struct linux_binprm *bprm); | 118 | extern int bprm_mm_init(struct linux_binprm *bprm); |
119 | extern int copy_strings_kernel(int argc,char ** argv,struct linux_binprm *bprm); | 119 | extern int copy_strings_kernel(int argc,char ** argv,struct linux_binprm *bprm); |
120 | extern int prepare_bprm_creds(struct linux_binprm *bprm); | ||
120 | extern void install_exec_creds(struct linux_binprm *bprm); | 121 | extern void install_exec_creds(struct linux_binprm *bprm); |
121 | extern void do_coredump(long signr, int exit_code, struct pt_regs *regs); | 122 | extern void do_coredump(long signr, int exit_code, struct pt_regs *regs); |
122 | extern int set_binfmt(struct linux_binfmt *new); | 123 | extern int set_binfmt(struct linux_binfmt *new); |
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 655e7721580a..df7607e6dce8 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h | |||
@@ -91,6 +91,9 @@ typedef int (*dm_iterate_devices_fn) (struct dm_target *ti, | |||
91 | iterate_devices_callout_fn fn, | 91 | iterate_devices_callout_fn fn, |
92 | void *data); | 92 | void *data); |
93 | 93 | ||
94 | typedef void (*dm_io_hints_fn) (struct dm_target *ti, | ||
95 | struct queue_limits *limits); | ||
96 | |||
94 | /* | 97 | /* |
95 | * Returns: | 98 | * Returns: |
96 | * 0: The target can handle the next I/O immediately. | 99 | * 0: The target can handle the next I/O immediately. |
@@ -151,6 +154,7 @@ struct target_type { | |||
151 | dm_merge_fn merge; | 154 | dm_merge_fn merge; |
152 | dm_busy_fn busy; | 155 | dm_busy_fn busy; |
153 | dm_iterate_devices_fn iterate_devices; | 156 | dm_iterate_devices_fn iterate_devices; |
157 | dm_io_hints_fn io_hints; | ||
154 | 158 | ||
155 | /* For internal device-mapper use. */ | 159 | /* For internal device-mapper use. */ |
156 | struct list_head list; | 160 | struct list_head list; |
diff --git a/include/linux/dm-log-userspace.h b/include/linux/dm-log-userspace.h index 642e3017b51f..8a1f972c0fe9 100644 --- a/include/linux/dm-log-userspace.h +++ b/include/linux/dm-log-userspace.h | |||
@@ -371,7 +371,18 @@ | |||
371 | (DM_ULOG_REQUEST_MASK & (request_type)) | 371 | (DM_ULOG_REQUEST_MASK & (request_type)) |
372 | 372 | ||
373 | struct dm_ulog_request { | 373 | struct dm_ulog_request { |
374 | char uuid[DM_UUID_LEN]; /* Ties a request to a specific mirror log */ | 374 | /* |
375 | * The local unique identifier (luid) and the universally unique | ||
376 | * identifier (uuid) are used to tie a request to a specific | ||
377 | * mirror log. A single machine log could probably make due with | ||
378 | * just the 'luid', but a cluster-aware log must use the 'uuid' and | ||
379 | * the 'luid'. The uuid is what is required for node to node | ||
380 | * communication concerning a particular log, but the 'luid' helps | ||
381 | * differentiate between logs that are being swapped and have the | ||
382 | * same 'uuid'. (Think "live" and "inactive" device-mapper tables.) | ||
383 | */ | ||
384 | uint64_t luid; | ||
385 | char uuid[DM_UUID_LEN]; | ||
375 | char padding[7]; /* Padding because DM_UUID_LEN = 129 */ | 386 | char padding[7]; /* Padding because DM_UUID_LEN = 129 */ |
376 | 387 | ||
377 | int32_t error; /* Used to report back processing errors */ | 388 | int32_t error; /* Used to report back processing errors */ |
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 13e1adf55c4c..6273fa97b527 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h | |||
@@ -240,6 +240,21 @@ static inline int cancel_delayed_work(struct delayed_work *work) | |||
240 | return ret; | 240 | return ret; |
241 | } | 241 | } |
242 | 242 | ||
243 | /* | ||
244 | * Like above, but uses del_timer() instead of del_timer_sync(). This means, | ||
245 | * if it returns 0 the timer function may be running and the queueing is in | ||
246 | * progress. | ||
247 | */ | ||
248 | static inline int __cancel_delayed_work(struct delayed_work *work) | ||
249 | { | ||
250 | int ret; | ||
251 | |||
252 | ret = del_timer(&work->timer); | ||
253 | if (ret) | ||
254 | work_clear_pending(&work->work); | ||
255 | return ret; | ||
256 | } | ||
257 | |||
243 | extern int cancel_delayed_work_sync(struct delayed_work *work); | 258 | extern int cancel_delayed_work_sync(struct delayed_work *work); |
244 | 259 | ||
245 | /* Obsolete. use cancel_delayed_work_sync() */ | 260 | /* Obsolete. use cancel_delayed_work_sync() */ |
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h index 7eafb8d54470..82a3191375f5 100644 --- a/include/net/pkt_sched.h +++ b/include/net/pkt_sched.h | |||
@@ -61,8 +61,8 @@ psched_tdiff_bounded(psched_time_t tv1, psched_time_t tv2, psched_time_t bound) | |||
61 | } | 61 | } |
62 | 62 | ||
63 | struct qdisc_watchdog { | 63 | struct qdisc_watchdog { |
64 | struct tasklet_hrtimer timer; | 64 | struct hrtimer timer; |
65 | struct Qdisc *qdisc; | 65 | struct Qdisc *qdisc; |
66 | }; | 66 | }; |
67 | 67 | ||
68 | extern void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc); | 68 | extern void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc); |
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index f274e1959885..d7cbc579fc80 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
@@ -50,7 +50,7 @@ static atomic_t nr_task_counters __read_mostly; | |||
50 | * 1 - disallow cpu counters to unpriv | 50 | * 1 - disallow cpu counters to unpriv |
51 | * 2 - disallow kernel profiling to unpriv | 51 | * 2 - disallow kernel profiling to unpriv |
52 | */ | 52 | */ |
53 | int sysctl_perf_counter_paranoid __read_mostly; | 53 | int sysctl_perf_counter_paranoid __read_mostly = 1; |
54 | 54 | ||
55 | static inline bool perf_paranoid_cpu(void) | 55 | static inline bool perf_paranoid_cpu(void) |
56 | { | 56 | { |
@@ -4066,6 +4066,7 @@ perf_counter_alloc(struct perf_counter_attr *attr, | |||
4066 | hwc->sample_period = attr->sample_period; | 4066 | hwc->sample_period = attr->sample_period; |
4067 | if (attr->freq && attr->sample_freq) | 4067 | if (attr->freq && attr->sample_freq) |
4068 | hwc->sample_period = 1; | 4068 | hwc->sample_period = 1; |
4069 | hwc->last_period = hwc->sample_period; | ||
4069 | 4070 | ||
4070 | atomic64_set(&hwc->period_left, hwc->sample_period); | 4071 | atomic64_set(&hwc->period_left, hwc->sample_period); |
4071 | 4072 | ||
diff --git a/mm/nommu.c b/mm/nommu.c index 4bde489ec431..66e81e7e9fe9 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
@@ -1352,6 +1352,7 @@ unsigned long do_mmap_pgoff(struct file *file, | |||
1352 | } | 1352 | } |
1353 | 1353 | ||
1354 | vma->vm_region = region; | 1354 | vma->vm_region = region; |
1355 | add_nommu_region(region); | ||
1355 | 1356 | ||
1356 | /* set up the mapping */ | 1357 | /* set up the mapping */ |
1357 | if (file && vma->vm_flags & VM_SHARED) | 1358 | if (file && vma->vm_flags & VM_SHARED) |
@@ -1361,8 +1362,6 @@ unsigned long do_mmap_pgoff(struct file *file, | |||
1361 | if (ret < 0) | 1362 | if (ret < 0) |
1362 | goto error_put_region; | 1363 | goto error_put_region; |
1363 | 1364 | ||
1364 | add_nommu_region(region); | ||
1365 | |||
1366 | /* okay... we have a mapping; now we have to register it */ | 1365 | /* okay... we have a mapping; now we have to register it */ |
1367 | result = vma->vm_start; | 1366 | result = vma->vm_start; |
1368 | 1367 | ||
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 5cc986eb9f6f..a0de15f46987 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -817,13 +817,15 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype) | |||
817 | * agressive about taking ownership of free pages | 817 | * agressive about taking ownership of free pages |
818 | */ | 818 | */ |
819 | if (unlikely(current_order >= (pageblock_order >> 1)) || | 819 | if (unlikely(current_order >= (pageblock_order >> 1)) || |
820 | start_migratetype == MIGRATE_RECLAIMABLE) { | 820 | start_migratetype == MIGRATE_RECLAIMABLE || |
821 | page_group_by_mobility_disabled) { | ||
821 | unsigned long pages; | 822 | unsigned long pages; |
822 | pages = move_freepages_block(zone, page, | 823 | pages = move_freepages_block(zone, page, |
823 | start_migratetype); | 824 | start_migratetype); |
824 | 825 | ||
825 | /* Claim the whole block if over half of it is free */ | 826 | /* Claim the whole block if over half of it is free */ |
826 | if (pages >= (1 << (pageblock_order-1))) | 827 | if (pages >= (1 << (pageblock_order-1)) || |
828 | page_group_by_mobility_disabled) | ||
827 | set_pageblock_migratetype(page, | 829 | set_pageblock_migratetype(page, |
828 | start_migratetype); | 830 | start_migratetype); |
829 | 831 | ||
diff --git a/mm/percpu.c b/mm/percpu.c index 5fe37842e0ea..3311c8919f37 100644 --- a/mm/percpu.c +++ b/mm/percpu.c | |||
@@ -197,7 +197,12 @@ static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk, | |||
197 | static bool pcpu_chunk_page_occupied(struct pcpu_chunk *chunk, | 197 | static bool pcpu_chunk_page_occupied(struct pcpu_chunk *chunk, |
198 | int page_idx) | 198 | int page_idx) |
199 | { | 199 | { |
200 | return *pcpu_chunk_pagep(chunk, 0, page_idx) != NULL; | 200 | /* |
201 | * Any possible cpu id can be used here, so there's no need to | ||
202 | * worry about preemption or cpu hotplug. | ||
203 | */ | ||
204 | return *pcpu_chunk_pagep(chunk, raw_smp_processor_id(), | ||
205 | page_idx) != NULL; | ||
201 | } | 206 | } |
202 | 207 | ||
203 | /* set the pointer to a chunk in a page struct */ | 208 | /* set the pointer to a chunk in a page struct */ |
@@ -297,6 +302,14 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr) | |||
297 | return pcpu_first_chunk; | 302 | return pcpu_first_chunk; |
298 | } | 303 | } |
299 | 304 | ||
305 | /* | ||
306 | * The address is relative to unit0 which might be unused and | ||
307 | * thus unmapped. Offset the address to the unit space of the | ||
308 | * current processor before looking it up in the vmalloc | ||
309 | * space. Note that any possible cpu id can be used here, so | ||
310 | * there's no need to worry about preemption or cpu hotplug. | ||
311 | */ | ||
312 | addr += raw_smp_processor_id() * pcpu_unit_size; | ||
300 | return pcpu_get_page_chunk(vmalloc_to_page(addr)); | 313 | return pcpu_get_page_chunk(vmalloc_to_page(addr)); |
301 | } | 314 | } |
302 | 315 | ||
@@ -2594,8 +2594,6 @@ static inline int kmem_cache_close(struct kmem_cache *s) | |||
2594 | */ | 2594 | */ |
2595 | void kmem_cache_destroy(struct kmem_cache *s) | 2595 | void kmem_cache_destroy(struct kmem_cache *s) |
2596 | { | 2596 | { |
2597 | if (s->flags & SLAB_DESTROY_BY_RCU) | ||
2598 | rcu_barrier(); | ||
2599 | down_write(&slub_lock); | 2597 | down_write(&slub_lock); |
2600 | s->refcount--; | 2598 | s->refcount--; |
2601 | if (!s->refcount) { | 2599 | if (!s->refcount) { |
@@ -2606,6 +2604,8 @@ void kmem_cache_destroy(struct kmem_cache *s) | |||
2606 | "still has objects.\n", s->name, __func__); | 2604 | "still has objects.\n", s->name, __func__); |
2607 | dump_stack(); | 2605 | dump_stack(); |
2608 | } | 2606 | } |
2607 | if (s->flags & SLAB_DESTROY_BY_RCU) | ||
2608 | rcu_barrier(); | ||
2609 | sysfs_slab_remove(s); | 2609 | sysfs_slab_remove(s); |
2610 | } else | 2610 | } else |
2611 | up_write(&slub_lock); | 2611 | up_write(&slub_lock); |
diff --git a/net/core/sock.c b/net/core/sock.c index bbb25be7ddfe..76334228ed1c 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -1025,6 +1025,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority, | |||
1025 | sk->sk_prot = sk->sk_prot_creator = prot; | 1025 | sk->sk_prot = sk->sk_prot_creator = prot; |
1026 | sock_lock_init(sk); | 1026 | sock_lock_init(sk); |
1027 | sock_net_set(sk, get_net(net)); | 1027 | sock_net_set(sk, get_net(net)); |
1028 | atomic_set(&sk->sk_wmem_alloc, 1); | ||
1028 | } | 1029 | } |
1029 | 1030 | ||
1030 | return sk; | 1031 | return sk; |
@@ -1872,7 +1873,6 @@ void sock_init_data(struct socket *sock, struct sock *sk) | |||
1872 | */ | 1873 | */ |
1873 | smp_wmb(); | 1874 | smp_wmb(); |
1874 | atomic_set(&sk->sk_refcnt, 1); | 1875 | atomic_set(&sk->sk_refcnt, 1); |
1875 | atomic_set(&sk->sk_wmem_alloc, 1); | ||
1876 | atomic_set(&sk->sk_drops, 0); | 1876 | atomic_set(&sk->sk_drops, 0); |
1877 | } | 1877 | } |
1878 | EXPORT_SYMBOL(sock_init_data); | 1878 | EXPORT_SYMBOL(sock_init_data); |
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 92e6f3a52c13..fdb694e9f759 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c | |||
@@ -458,7 +458,7 @@ EXPORT_SYMBOL(qdisc_warn_nonwc); | |||
458 | static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer) | 458 | static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer) |
459 | { | 459 | { |
460 | struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog, | 460 | struct qdisc_watchdog *wd = container_of(timer, struct qdisc_watchdog, |
461 | timer.timer); | 461 | timer); |
462 | 462 | ||
463 | wd->qdisc->flags &= ~TCQ_F_THROTTLED; | 463 | wd->qdisc->flags &= ~TCQ_F_THROTTLED; |
464 | __netif_schedule(qdisc_root(wd->qdisc)); | 464 | __netif_schedule(qdisc_root(wd->qdisc)); |
@@ -468,8 +468,8 @@ static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer) | |||
468 | 468 | ||
469 | void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc) | 469 | void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc) |
470 | { | 470 | { |
471 | tasklet_hrtimer_init(&wd->timer, qdisc_watchdog, | 471 | hrtimer_init(&wd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); |
472 | CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | 472 | wd->timer.function = qdisc_watchdog; |
473 | wd->qdisc = qdisc; | 473 | wd->qdisc = qdisc; |
474 | } | 474 | } |
475 | EXPORT_SYMBOL(qdisc_watchdog_init); | 475 | EXPORT_SYMBOL(qdisc_watchdog_init); |
@@ -485,13 +485,13 @@ void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires) | |||
485 | wd->qdisc->flags |= TCQ_F_THROTTLED; | 485 | wd->qdisc->flags |= TCQ_F_THROTTLED; |
486 | time = ktime_set(0, 0); | 486 | time = ktime_set(0, 0); |
487 | time = ktime_add_ns(time, PSCHED_TICKS2NS(expires)); | 487 | time = ktime_add_ns(time, PSCHED_TICKS2NS(expires)); |
488 | tasklet_hrtimer_start(&wd->timer, time, HRTIMER_MODE_ABS); | 488 | hrtimer_start(&wd->timer, time, HRTIMER_MODE_ABS); |
489 | } | 489 | } |
490 | EXPORT_SYMBOL(qdisc_watchdog_schedule); | 490 | EXPORT_SYMBOL(qdisc_watchdog_schedule); |
491 | 491 | ||
492 | void qdisc_watchdog_cancel(struct qdisc_watchdog *wd) | 492 | void qdisc_watchdog_cancel(struct qdisc_watchdog *wd) |
493 | { | 493 | { |
494 | tasklet_hrtimer_cancel(&wd->timer); | 494 | hrtimer_cancel(&wd->timer); |
495 | wd->qdisc->flags &= ~TCQ_F_THROTTLED; | 495 | wd->qdisc->flags &= ~TCQ_F_THROTTLED; |
496 | } | 496 | } |
497 | EXPORT_SYMBOL(qdisc_watchdog_cancel); | 497 | EXPORT_SYMBOL(qdisc_watchdog_cancel); |
@@ -1456,6 +1456,8 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q, | |||
1456 | nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags); | 1456 | nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags); |
1457 | tcm = NLMSG_DATA(nlh); | 1457 | tcm = NLMSG_DATA(nlh); |
1458 | tcm->tcm_family = AF_UNSPEC; | 1458 | tcm->tcm_family = AF_UNSPEC; |
1459 | tcm->tcm__pad1 = 0; | ||
1460 | tcm->tcm__pad2 = 0; | ||
1459 | tcm->tcm_ifindex = qdisc_dev(q)->ifindex; | 1461 | tcm->tcm_ifindex = qdisc_dev(q)->ifindex; |
1460 | tcm->tcm_parent = q->handle; | 1462 | tcm->tcm_parent = q->handle; |
1461 | tcm->tcm_handle = q->handle; | 1463 | tcm->tcm_handle = q->handle; |
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 149b0405c5ec..d5798e17a832 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c | |||
@@ -163,7 +163,7 @@ struct cbq_sched_data | |||
163 | psched_time_t now_rt; /* Cached real time */ | 163 | psched_time_t now_rt; /* Cached real time */ |
164 | unsigned pmask; | 164 | unsigned pmask; |
165 | 165 | ||
166 | struct tasklet_hrtimer delay_timer; | 166 | struct hrtimer delay_timer; |
167 | struct qdisc_watchdog watchdog; /* Watchdog timer, | 167 | struct qdisc_watchdog watchdog; /* Watchdog timer, |
168 | started when CBQ has | 168 | started when CBQ has |
169 | backlog, but cannot | 169 | backlog, but cannot |
@@ -503,8 +503,6 @@ static void cbq_ovl_delay(struct cbq_class *cl) | |||
503 | cl->undertime = q->now + delay; | 503 | cl->undertime = q->now + delay; |
504 | 504 | ||
505 | if (delay > 0) { | 505 | if (delay > 0) { |
506 | struct hrtimer *ht; | ||
507 | |||
508 | sched += delay + cl->penalty; | 506 | sched += delay + cl->penalty; |
509 | cl->penalized = sched; | 507 | cl->penalized = sched; |
510 | cl->cpriority = TC_CBQ_MAXPRIO; | 508 | cl->cpriority = TC_CBQ_MAXPRIO; |
@@ -512,12 +510,12 @@ static void cbq_ovl_delay(struct cbq_class *cl) | |||
512 | 510 | ||
513 | expires = ktime_set(0, 0); | 511 | expires = ktime_set(0, 0); |
514 | expires = ktime_add_ns(expires, PSCHED_TICKS2NS(sched)); | 512 | expires = ktime_add_ns(expires, PSCHED_TICKS2NS(sched)); |
515 | ht = &q->delay_timer.timer; | 513 | if (hrtimer_try_to_cancel(&q->delay_timer) && |
516 | if (hrtimer_try_to_cancel(ht) && | 514 | ktime_to_ns(ktime_sub( |
517 | ktime_to_ns(ktime_sub(hrtimer_get_expires(ht), | 515 | hrtimer_get_expires(&q->delay_timer), |
518 | expires)) > 0) | 516 | expires)) > 0) |
519 | hrtimer_set_expires(ht, expires); | 517 | hrtimer_set_expires(&q->delay_timer, expires); |
520 | hrtimer_restart(ht); | 518 | hrtimer_restart(&q->delay_timer); |
521 | cl->delayed = 1; | 519 | cl->delayed = 1; |
522 | cl->xstats.overactions++; | 520 | cl->xstats.overactions++; |
523 | return; | 521 | return; |
@@ -593,7 +591,7 @@ static psched_tdiff_t cbq_undelay_prio(struct cbq_sched_data *q, int prio, | |||
593 | static enum hrtimer_restart cbq_undelay(struct hrtimer *timer) | 591 | static enum hrtimer_restart cbq_undelay(struct hrtimer *timer) |
594 | { | 592 | { |
595 | struct cbq_sched_data *q = container_of(timer, struct cbq_sched_data, | 593 | struct cbq_sched_data *q = container_of(timer, struct cbq_sched_data, |
596 | delay_timer.timer); | 594 | delay_timer); |
597 | struct Qdisc *sch = q->watchdog.qdisc; | 595 | struct Qdisc *sch = q->watchdog.qdisc; |
598 | psched_time_t now; | 596 | psched_time_t now; |
599 | psched_tdiff_t delay = 0; | 597 | psched_tdiff_t delay = 0; |
@@ -623,7 +621,7 @@ static enum hrtimer_restart cbq_undelay(struct hrtimer *timer) | |||
623 | 621 | ||
624 | time = ktime_set(0, 0); | 622 | time = ktime_set(0, 0); |
625 | time = ktime_add_ns(time, PSCHED_TICKS2NS(now + delay)); | 623 | time = ktime_add_ns(time, PSCHED_TICKS2NS(now + delay)); |
626 | tasklet_hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS); | 624 | hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS); |
627 | } | 625 | } |
628 | 626 | ||
629 | sch->flags &= ~TCQ_F_THROTTLED; | 627 | sch->flags &= ~TCQ_F_THROTTLED; |
@@ -1216,7 +1214,7 @@ cbq_reset(struct Qdisc* sch) | |||
1216 | q->tx_class = NULL; | 1214 | q->tx_class = NULL; |
1217 | q->tx_borrowed = NULL; | 1215 | q->tx_borrowed = NULL; |
1218 | qdisc_watchdog_cancel(&q->watchdog); | 1216 | qdisc_watchdog_cancel(&q->watchdog); |
1219 | tasklet_hrtimer_cancel(&q->delay_timer); | 1217 | hrtimer_cancel(&q->delay_timer); |
1220 | q->toplevel = TC_CBQ_MAXLEVEL; | 1218 | q->toplevel = TC_CBQ_MAXLEVEL; |
1221 | q->now = psched_get_time(); | 1219 | q->now = psched_get_time(); |
1222 | q->now_rt = q->now; | 1220 | q->now_rt = q->now; |
@@ -1399,8 +1397,7 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt) | |||
1399 | q->link.minidle = -0x7FFFFFFF; | 1397 | q->link.minidle = -0x7FFFFFFF; |
1400 | 1398 | ||
1401 | qdisc_watchdog_init(&q->watchdog, sch); | 1399 | qdisc_watchdog_init(&q->watchdog, sch); |
1402 | tasklet_hrtimer_init(&q->delay_timer, cbq_undelay, | 1400 | hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); |
1403 | CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | ||
1404 | q->delay_timer.function = cbq_undelay; | 1401 | q->delay_timer.function = cbq_undelay; |
1405 | q->toplevel = TC_CBQ_MAXLEVEL; | 1402 | q->toplevel = TC_CBQ_MAXLEVEL; |
1406 | q->now = psched_get_time(); | 1403 | q->now = psched_get_time(); |
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c index 4732f5e5d127..b85e61bcf246 100644 --- a/security/integrity/ima/ima_main.c +++ b/security/integrity/ima/ima_main.c | |||
@@ -249,7 +249,11 @@ void ima_counts_put(struct path *path, int mask) | |||
249 | struct inode *inode = path->dentry->d_inode; | 249 | struct inode *inode = path->dentry->d_inode; |
250 | struct ima_iint_cache *iint; | 250 | struct ima_iint_cache *iint; |
251 | 251 | ||
252 | if (!ima_initialized || !S_ISREG(inode->i_mode)) | 252 | /* The inode may already have been freed, freeing the iint |
253 | * with it. Verify the inode is not NULL before dereferencing | ||
254 | * it. | ||
255 | */ | ||
256 | if (!ima_initialized || !inode || !S_ISREG(inode->i_mode)) | ||
253 | return; | 257 | return; |
254 | iint = ima_iint_find_insert_get(inode); | 258 | iint = ima_iint_find_insert_get(inode); |
255 | if (!iint) | 259 | if (!iint) |
diff --git a/sound/pci/oxygen/oxygen_lib.c b/sound/pci/oxygen/oxygen_lib.c index 312251d39696..9a8936e20744 100644 --- a/sound/pci/oxygen/oxygen_lib.c +++ b/sound/pci/oxygen/oxygen_lib.c | |||
@@ -260,6 +260,9 @@ oxygen_search_pci_id(struct oxygen *chip, const struct pci_device_id ids[]) | |||
260 | * chip didn't if the first EEPROM word was overwritten. | 260 | * chip didn't if the first EEPROM word was overwritten. |
261 | */ | 261 | */ |
262 | subdevice = oxygen_read_eeprom(chip, 2); | 262 | subdevice = oxygen_read_eeprom(chip, 2); |
263 | /* use default ID if EEPROM is missing */ | ||
264 | if (subdevice == 0xffff) | ||
265 | subdevice = 0x8788; | ||
263 | /* | 266 | /* |
264 | * We use only the subsystem device ID for searching because it is | 267 | * We use only the subsystem device ID for searching because it is |
265 | * unique even without the subsystem vendor ID, which may have been | 268 | * unique even without the subsystem vendor ID, which may have been |
diff --git a/sound/pci/oxygen/oxygen_pcm.c b/sound/pci/oxygen/oxygen_pcm.c index 3b5ca70c9d4d..ef2345d82b86 100644 --- a/sound/pci/oxygen/oxygen_pcm.c +++ b/sound/pci/oxygen/oxygen_pcm.c | |||
@@ -469,9 +469,11 @@ static int oxygen_multich_hw_params(struct snd_pcm_substream *substream, | |||
469 | oxygen_write16_masked(chip, OXYGEN_I2S_MULTICH_FORMAT, | 469 | oxygen_write16_masked(chip, OXYGEN_I2S_MULTICH_FORMAT, |
470 | oxygen_rate(hw_params) | | 470 | oxygen_rate(hw_params) | |
471 | chip->model.dac_i2s_format | | 471 | chip->model.dac_i2s_format | |
472 | oxygen_i2s_mclk(hw_params) | | ||
472 | oxygen_i2s_bits(hw_params), | 473 | oxygen_i2s_bits(hw_params), |
473 | OXYGEN_I2S_RATE_MASK | | 474 | OXYGEN_I2S_RATE_MASK | |
474 | OXYGEN_I2S_FORMAT_MASK | | 475 | OXYGEN_I2S_FORMAT_MASK | |
476 | OXYGEN_I2S_MCLK_MASK | | ||
475 | OXYGEN_I2S_BITS_MASK); | 477 | OXYGEN_I2S_BITS_MASK); |
476 | oxygen_update_dac_routing(chip); | 478 | oxygen_update_dac_routing(chip); |
477 | oxygen_update_spdif_source(chip); | 479 | oxygen_update_spdif_source(chip); |