aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/kernel/iommu.c2
-rw-r--r--arch/powerpc/kernel/sysfs.c18
-rw-r--r--arch/powerpc/kernel/tm.S95
-rw-r--r--arch/powerpc/kernel/vio.c12
-rw-r--r--arch/powerpc/lib/checksum_64.S58
-rw-r--r--arch/powerpc/mm/init_64.c4
-rw-r--r--arch/powerpc/mm/mem.c9
-rw-r--r--arch/powerpc/perf/power8-pmu.c5
-rw-r--r--mm/Kconfig2
9 files changed, 148 insertions, 57 deletions
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 0adab06ce5c0..572bb5b95f35 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -661,7 +661,7 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
661 /* number of bytes needed for the bitmap */ 661 /* number of bytes needed for the bitmap */
662 sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long); 662 sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
663 663
664 page = alloc_pages_node(nid, GFP_ATOMIC, get_order(sz)); 664 page = alloc_pages_node(nid, GFP_KERNEL, get_order(sz));
665 if (!page) 665 if (!page)
666 panic("iommu_init_table: Can't allocate %ld bytes\n", sz); 666 panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
667 tbl->it_map = page_address(page); 667 tbl->it_map = page_address(page);
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index 27a90b99ef67..b4e667663d9b 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -17,6 +17,7 @@
17#include <asm/machdep.h> 17#include <asm/machdep.h>
18#include <asm/smp.h> 18#include <asm/smp.h>
19#include <asm/pmc.h> 19#include <asm/pmc.h>
20#include <asm/firmware.h>
20 21
21#include "cacheinfo.h" 22#include "cacheinfo.h"
22 23
@@ -179,15 +180,25 @@ SYSFS_PMCSETUP(spurr, SPRN_SPURR);
179SYSFS_PMCSETUP(dscr, SPRN_DSCR); 180SYSFS_PMCSETUP(dscr, SPRN_DSCR);
180SYSFS_PMCSETUP(pir, SPRN_PIR); 181SYSFS_PMCSETUP(pir, SPRN_PIR);
181 182
183/*
184 Lets only enable read for phyp resources and
185 enable write when needed with a separate function.
186 Lets be conservative and default to pseries.
187*/
182static DEVICE_ATTR(mmcra, 0600, show_mmcra, store_mmcra); 188static DEVICE_ATTR(mmcra, 0600, show_mmcra, store_mmcra);
183static DEVICE_ATTR(spurr, 0400, show_spurr, NULL); 189static DEVICE_ATTR(spurr, 0400, show_spurr, NULL);
184static DEVICE_ATTR(dscr, 0600, show_dscr, store_dscr); 190static DEVICE_ATTR(dscr, 0600, show_dscr, store_dscr);
185static DEVICE_ATTR(purr, 0600, show_purr, store_purr); 191static DEVICE_ATTR(purr, 0400, show_purr, store_purr);
186static DEVICE_ATTR(pir, 0400, show_pir, NULL); 192static DEVICE_ATTR(pir, 0400, show_pir, NULL);
187 193
188unsigned long dscr_default = 0; 194unsigned long dscr_default = 0;
189EXPORT_SYMBOL(dscr_default); 195EXPORT_SYMBOL(dscr_default);
190 196
197static void add_write_permission_dev_attr(struct device_attribute *attr)
198{
199 attr->attr.mode |= 0200;
200}
201
191static ssize_t show_dscr_default(struct device *dev, 202static ssize_t show_dscr_default(struct device *dev,
192 struct device_attribute *attr, char *buf) 203 struct device_attribute *attr, char *buf)
193{ 204{
@@ -394,8 +405,11 @@ static void register_cpu_online(unsigned int cpu)
394 if (cpu_has_feature(CPU_FTR_MMCRA)) 405 if (cpu_has_feature(CPU_FTR_MMCRA))
395 device_create_file(s, &dev_attr_mmcra); 406 device_create_file(s, &dev_attr_mmcra);
396 407
397 if (cpu_has_feature(CPU_FTR_PURR)) 408 if (cpu_has_feature(CPU_FTR_PURR)) {
409 if (!firmware_has_feature(FW_FEATURE_LPAR))
410 add_write_permission_dev_attr(&dev_attr_purr);
398 device_create_file(s, &dev_attr_purr); 411 device_create_file(s, &dev_attr_purr);
412 }
399 413
400 if (cpu_has_feature(CPU_FTR_SPURR)) 414 if (cpu_has_feature(CPU_FTR_SPURR))
401 device_create_file(s, &dev_attr_spurr); 415 device_create_file(s, &dev_attr_spurr);
diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S
index 7b60b9851469..cd809eaa8b5c 100644
--- a/arch/powerpc/kernel/tm.S
+++ b/arch/powerpc/kernel/tm.S
@@ -79,6 +79,11 @@ _GLOBAL(tm_abort)
79 TABORT(R3) 79 TABORT(R3)
80 blr 80 blr
81 81
82 .section ".toc","aw"
83DSCR_DEFAULT:
84 .tc dscr_default[TC],dscr_default
85
86 .section ".text"
82 87
83/* void tm_reclaim(struct thread_struct *thread, 88/* void tm_reclaim(struct thread_struct *thread,
84 * unsigned long orig_msr, 89 * unsigned long orig_msr,
@@ -123,6 +128,7 @@ _GLOBAL(tm_reclaim)
123 mr r15, r14 128 mr r15, r14
124 ori r15, r15, MSR_FP 129 ori r15, r15, MSR_FP
125 li r16, MSR_RI 130 li r16, MSR_RI
131 ori r16, r16, MSR_EE /* IRQs hard off */
126 andc r15, r15, r16 132 andc r15, r15, r16
127 oris r15, r15, MSR_VEC@h 133 oris r15, r15, MSR_VEC@h
128#ifdef CONFIG_VSX 134#ifdef CONFIG_VSX
@@ -187,11 +193,18 @@ dont_backup_fp:
187 std r1, PACATMSCRATCH(r13) 193 std r1, PACATMSCRATCH(r13)
188 ld r1, PACAR1(r13) 194 ld r1, PACAR1(r13)
189 195
196 /* Store the PPR in r11 and reset to decent value */
197 std r11, GPR11(r1) /* Temporary stash */
198 mfspr r11, SPRN_PPR
199 HMT_MEDIUM
200
190 /* Now get some more GPRS free */ 201 /* Now get some more GPRS free */
191 std r7, GPR7(r1) /* Temporary stash */ 202 std r7, GPR7(r1) /* Temporary stash */
192 std r12, GPR12(r1) /* '' '' '' */ 203 std r12, GPR12(r1) /* '' '' '' */
193 ld r12, STACK_PARAM(0)(r1) /* Param 0, thread_struct * */ 204 ld r12, STACK_PARAM(0)(r1) /* Param 0, thread_struct * */
194 205
206 std r11, THREAD_TM_PPR(r12) /* Store PPR and free r11 */
207
195 addi r7, r12, PT_CKPT_REGS /* Thread's ckpt_regs */ 208 addi r7, r12, PT_CKPT_REGS /* Thread's ckpt_regs */
196 209
197 /* Make r7 look like an exception frame so that we 210 /* Make r7 look like an exception frame so that we
@@ -203,15 +216,19 @@ dont_backup_fp:
203 SAVE_GPR(0, r7) /* user r0 */ 216 SAVE_GPR(0, r7) /* user r0 */
204 SAVE_GPR(2, r7) /* user r2 */ 217 SAVE_GPR(2, r7) /* user r2 */
205 SAVE_4GPRS(3, r7) /* user r3-r6 */ 218 SAVE_4GPRS(3, r7) /* user r3-r6 */
206 SAVE_4GPRS(8, r7) /* user r8-r11 */ 219 SAVE_GPR(8, r7) /* user r8 */
220 SAVE_GPR(9, r7) /* user r9 */
221 SAVE_GPR(10, r7) /* user r10 */
207 ld r3, PACATMSCRATCH(r13) /* user r1 */ 222 ld r3, PACATMSCRATCH(r13) /* user r1 */
208 ld r4, GPR7(r1) /* user r7 */ 223 ld r4, GPR7(r1) /* user r7 */
209 ld r5, GPR12(r1) /* user r12 */ 224 ld r5, GPR11(r1) /* user r11 */
210 GET_SCRATCH0(6) /* user r13 */ 225 ld r6, GPR12(r1) /* user r12 */
226 GET_SCRATCH0(8) /* user r13 */
211 std r3, GPR1(r7) 227 std r3, GPR1(r7)
212 std r4, GPR7(r7) 228 std r4, GPR7(r7)
213 std r5, GPR12(r7) 229 std r5, GPR11(r7)
214 std r6, GPR13(r7) 230 std r6, GPR12(r7)
231 std r8, GPR13(r7)
215 232
216 SAVE_NVGPRS(r7) /* user r14-r31 */ 233 SAVE_NVGPRS(r7) /* user r14-r31 */
217 234
@@ -234,14 +251,12 @@ dont_backup_fp:
234 std r6, _XER(r7) 251 std r6, _XER(r7)
235 252
236 253
237 /* ******************** TAR, PPR, DSCR ********** */ 254 /* ******************** TAR, DSCR ********** */
238 mfspr r3, SPRN_TAR 255 mfspr r3, SPRN_TAR
239 mfspr r4, SPRN_PPR 256 mfspr r4, SPRN_DSCR
240 mfspr r5, SPRN_DSCR
241 257
242 std r3, THREAD_TM_TAR(r12) 258 std r3, THREAD_TM_TAR(r12)
243 std r4, THREAD_TM_PPR(r12) 259 std r4, THREAD_TM_DSCR(r12)
244 std r5, THREAD_TM_DSCR(r12)
245 260
246 /* MSR and flags: We don't change CRs, and we don't need to alter 261 /* MSR and flags: We don't change CRs, and we don't need to alter
247 * MSR. 262 * MSR.
@@ -258,7 +273,7 @@ dont_backup_fp:
258 std r3, THREAD_TM_TFHAR(r12) 273 std r3, THREAD_TM_TFHAR(r12)
259 std r4, THREAD_TM_TFIAR(r12) 274 std r4, THREAD_TM_TFIAR(r12)
260 275
261 /* AMR and PPR are checkpointed too, but are unsupported by Linux. */ 276 /* AMR is checkpointed too, but is unsupported by Linux. */
262 277
263 /* Restore original MSR/IRQ state & clear TM mode */ 278 /* Restore original MSR/IRQ state & clear TM mode */
264 ld r14, TM_FRAME_L0(r1) /* Orig MSR */ 279 ld r14, TM_FRAME_L0(r1) /* Orig MSR */
@@ -274,6 +289,12 @@ dont_backup_fp:
274 mtcr r4 289 mtcr r4
275 mtlr r0 290 mtlr r0
276 ld r2, 40(r1) 291 ld r2, 40(r1)
292
293 /* Load system default DSCR */
294 ld r4, DSCR_DEFAULT@toc(r2)
295 ld r0, 0(r4)
296 mtspr SPRN_DSCR, r0
297
277 blr 298 blr
278 299
279 300
@@ -358,25 +379,24 @@ dont_restore_fp:
358 379
359restore_gprs: 380restore_gprs:
360 381
361 /* ******************** TAR, PPR, DSCR ********** */ 382 /* ******************** CR,LR,CCR,MSR ********** */
362 ld r4, THREAD_TM_TAR(r3) 383 ld r4, _CTR(r7)
363 ld r5, THREAD_TM_PPR(r3) 384 ld r5, _LINK(r7)
364 ld r6, THREAD_TM_DSCR(r3) 385 ld r6, _CCR(r7)
386 ld r8, _XER(r7)
365 387
366 mtspr SPRN_TAR, r4 388 mtctr r4
367 mtspr SPRN_PPR, r5 389 mtlr r5
368 mtspr SPRN_DSCR, r6 390 mtcr r6
391 mtxer r8
369 392
370 /* ******************** CR,LR,CCR,MSR ********** */ 393 /* ******************** TAR ******************** */
371 ld r3, _CTR(r7) 394 ld r4, THREAD_TM_TAR(r3)
372 ld r4, _LINK(r7) 395 mtspr SPRN_TAR, r4
373 ld r5, _CCR(r7)
374 ld r6, _XER(r7)
375 396
376 mtctr r3 397 /* Load up the PPR and DSCR in GPRs only at this stage */
377 mtlr r4 398 ld r5, THREAD_TM_DSCR(r3)
378 mtcr r5 399 ld r6, THREAD_TM_PPR(r3)
379 mtxer r6
380 400
381 /* Clear the MSR RI since we are about to change R1. EE is already off 401 /* Clear the MSR RI since we are about to change R1. EE is already off
382 */ 402 */
@@ -384,19 +404,26 @@ restore_gprs:
384 mtmsrd r4, 1 404 mtmsrd r4, 1
385 405
386 REST_4GPRS(0, r7) /* GPR0-3 */ 406 REST_4GPRS(0, r7) /* GPR0-3 */
387 REST_GPR(4, r7) /* GPR4-6 */ 407 REST_GPR(4, r7) /* GPR4 */
388 REST_GPR(5, r7)
389 REST_GPR(6, r7)
390 REST_4GPRS(8, r7) /* GPR8-11 */ 408 REST_4GPRS(8, r7) /* GPR8-11 */
391 REST_2GPRS(12, r7) /* GPR12-13 */ 409 REST_2GPRS(12, r7) /* GPR12-13 */
392 410
393 REST_NVGPRS(r7) /* GPR14-31 */ 411 REST_NVGPRS(r7) /* GPR14-31 */
394 412
395 ld r7, GPR7(r7) /* GPR7 */ 413 /* Load up PPR and DSCR here so we don't run with user values for long
414 */
415 mtspr SPRN_DSCR, r5
416 mtspr SPRN_PPR, r6
417
418 REST_GPR(5, r7) /* GPR5-7 */
419 REST_GPR(6, r7)
420 ld r7, GPR7(r7)
396 421
397 /* Commit register state as checkpointed state: */ 422 /* Commit register state as checkpointed state: */
398 TRECHKPT 423 TRECHKPT
399 424
425 HMT_MEDIUM
426
400 /* Our transactional state has now changed. 427 /* Our transactional state has now changed.
401 * 428 *
402 * Now just get out of here. Transactional (current) state will be 429 * Now just get out of here. Transactional (current) state will be
@@ -419,6 +446,12 @@ restore_gprs:
419 mtcr r4 446 mtcr r4
420 mtlr r0 447 mtlr r0
421 ld r2, 40(r1) 448 ld r2, 40(r1)
449
450 /* Load system default DSCR */
451 ld r4, DSCR_DEFAULT@toc(r2)
452 ld r0, 0(r4)
453 mtspr SPRN_DSCR, r0
454
422 blr 455 blr
423 456
424 /* ****************************************************************** */ 457 /* ****************************************************************** */
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index 78a350670de3..d38cc08b16c7 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -1530,11 +1530,15 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
1530 const char *cp; 1530 const char *cp;
1531 1531
1532 dn = dev->of_node; 1532 dn = dev->of_node;
1533 if (!dn) 1533 if (!dn) {
1534 return -ENODEV; 1534 strcat(buf, "\n");
1535 return strlen(buf);
1536 }
1535 cp = of_get_property(dn, "compatible", NULL); 1537 cp = of_get_property(dn, "compatible", NULL);
1536 if (!cp) 1538 if (!cp) {
1537 return -ENODEV; 1539 strcat(buf, "\n");
1540 return strlen(buf);
1541 }
1538 1542
1539 return sprintf(buf, "vio:T%sS%s\n", vio_dev->type, cp); 1543 return sprintf(buf, "vio:T%sS%s\n", vio_dev->type, cp);
1540} 1544}
diff --git a/arch/powerpc/lib/checksum_64.S b/arch/powerpc/lib/checksum_64.S
index 167f72555d60..57a072065057 100644
--- a/arch/powerpc/lib/checksum_64.S
+++ b/arch/powerpc/lib/checksum_64.S
@@ -226,19 +226,35 @@ _GLOBAL(csum_partial)
226 blr 226 blr
227 227
228 228
229 .macro source 229 .macro srcnr
230100: 230100:
231 .section __ex_table,"a" 231 .section __ex_table,"a"
232 .align 3 232 .align 3
233 .llong 100b,.Lsrc_error 233 .llong 100b,.Lsrc_error_nr
234 .previous 234 .previous
235 .endm 235 .endm
236 236
237 .macro dest 237 .macro source
238150:
239 .section __ex_table,"a"
240 .align 3
241 .llong 150b,.Lsrc_error
242 .previous
243 .endm
244
245 .macro dstnr
238200: 246200:
239 .section __ex_table,"a" 247 .section __ex_table,"a"
240 .align 3 248 .align 3
241 .llong 200b,.Ldest_error 249 .llong 200b,.Ldest_error_nr
250 .previous
251 .endm
252
253 .macro dest
254250:
255 .section __ex_table,"a"
256 .align 3
257 .llong 250b,.Ldest_error
242 .previous 258 .previous
243 .endm 259 .endm
244 260
@@ -269,16 +285,16 @@ _GLOBAL(csum_partial_copy_generic)
269 rldicl. r6,r3,64-1,64-2 /* r6 = (r3 & 0x3) >> 1 */ 285 rldicl. r6,r3,64-1,64-2 /* r6 = (r3 & 0x3) >> 1 */
270 beq .Lcopy_aligned 286 beq .Lcopy_aligned
271 287
272 li r7,4 288 li r9,4
273 sub r6,r7,r6 289 sub r6,r9,r6
274 mtctr r6 290 mtctr r6
275 291
2761: 2921:
277source; lhz r6,0(r3) /* align to doubleword */ 293srcnr; lhz r6,0(r3) /* align to doubleword */
278 subi r5,r5,2 294 subi r5,r5,2
279 addi r3,r3,2 295 addi r3,r3,2
280 adde r0,r0,r6 296 adde r0,r0,r6
281dest; sth r6,0(r4) 297dstnr; sth r6,0(r4)
282 addi r4,r4,2 298 addi r4,r4,2
283 bdnz 1b 299 bdnz 1b
284 300
@@ -392,10 +408,10 @@ dest; std r16,56(r4)
392 408
393 mtctr r6 409 mtctr r6
3943: 4103:
395source; ld r6,0(r3) 411srcnr; ld r6,0(r3)
396 addi r3,r3,8 412 addi r3,r3,8
397 adde r0,r0,r6 413 adde r0,r0,r6
398dest; std r6,0(r4) 414dstnr; std r6,0(r4)
399 addi r4,r4,8 415 addi r4,r4,8
400 bdnz 3b 416 bdnz 3b
401 417
@@ -405,10 +421,10 @@ dest; std r6,0(r4)
405 srdi. r6,r5,2 421 srdi. r6,r5,2
406 beq .Lcopy_tail_halfword 422 beq .Lcopy_tail_halfword
407 423
408source; lwz r6,0(r3) 424srcnr; lwz r6,0(r3)
409 addi r3,r3,4 425 addi r3,r3,4
410 adde r0,r0,r6 426 adde r0,r0,r6
411dest; stw r6,0(r4) 427dstnr; stw r6,0(r4)
412 addi r4,r4,4 428 addi r4,r4,4
413 subi r5,r5,4 429 subi r5,r5,4
414 430
@@ -416,10 +432,10 @@ dest; stw r6,0(r4)
416 srdi. r6,r5,1 432 srdi. r6,r5,1
417 beq .Lcopy_tail_byte 433 beq .Lcopy_tail_byte
418 434
419source; lhz r6,0(r3) 435srcnr; lhz r6,0(r3)
420 addi r3,r3,2 436 addi r3,r3,2
421 adde r0,r0,r6 437 adde r0,r0,r6
422dest; sth r6,0(r4) 438dstnr; sth r6,0(r4)
423 addi r4,r4,2 439 addi r4,r4,2
424 subi r5,r5,2 440 subi r5,r5,2
425 441
@@ -427,10 +443,10 @@ dest; sth r6,0(r4)
427 andi. r6,r5,1 443 andi. r6,r5,1
428 beq .Lcopy_finish 444 beq .Lcopy_finish
429 445
430source; lbz r6,0(r3) 446srcnr; lbz r6,0(r3)
431 sldi r9,r6,8 /* Pad the byte out to 16 bits */ 447 sldi r9,r6,8 /* Pad the byte out to 16 bits */
432 adde r0,r0,r9 448 adde r0,r0,r9
433dest; stb r6,0(r4) 449dstnr; stb r6,0(r4)
434 450
435.Lcopy_finish: 451.Lcopy_finish:
436 addze r0,r0 /* add in final carry */ 452 addze r0,r0 /* add in final carry */
@@ -440,6 +456,11 @@ dest; stb r6,0(r4)
440 blr 456 blr
441 457
442.Lsrc_error: 458.Lsrc_error:
459 ld r14,STK_REG(R14)(r1)
460 ld r15,STK_REG(R15)(r1)
461 ld r16,STK_REG(R16)(r1)
462 addi r1,r1,STACKFRAMESIZE
463.Lsrc_error_nr:
443 cmpdi 0,r7,0 464 cmpdi 0,r7,0
444 beqlr 465 beqlr
445 li r6,-EFAULT 466 li r6,-EFAULT
@@ -447,6 +468,11 @@ dest; stb r6,0(r4)
447 blr 468 blr
448 469
449.Ldest_error: 470.Ldest_error:
471 ld r14,STK_REG(R14)(r1)
472 ld r15,STK_REG(R15)(r1)
473 ld r16,STK_REG(R16)(r1)
474 addi r1,r1,STACKFRAMESIZE
475.Ldest_error_nr:
450 cmpdi 0,r8,0 476 cmpdi 0,r8,0
451 beqlr 477 beqlr
452 li r6,-EFAULT 478 li r6,-EFAULT
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index d0cd9e4c6837..8ed035d2edb5 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -300,5 +300,9 @@ void vmemmap_free(unsigned long start, unsigned long end)
300{ 300{
301} 301}
302 302
303void register_page_bootmem_memmap(unsigned long section_nr,
304 struct page *start_page, unsigned long size)
305{
306}
303#endif /* CONFIG_SPARSEMEM_VMEMMAP */ 307#endif /* CONFIG_SPARSEMEM_VMEMMAP */
304 308
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 1cf9c5b67f24..3fa93dc7fe75 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -297,12 +297,21 @@ void __init paging_init(void)
297} 297}
298#endif /* ! CONFIG_NEED_MULTIPLE_NODES */ 298#endif /* ! CONFIG_NEED_MULTIPLE_NODES */
299 299
300static void __init register_page_bootmem_info(void)
301{
302 int i;
303
304 for_each_online_node(i)
305 register_page_bootmem_info_node(NODE_DATA(i));
306}
307
300void __init mem_init(void) 308void __init mem_init(void)
301{ 309{
302#ifdef CONFIG_SWIOTLB 310#ifdef CONFIG_SWIOTLB
303 swiotlb_init(0); 311 swiotlb_init(0);
304#endif 312#endif
305 313
314 register_page_bootmem_info();
306 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); 315 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
307 set_max_mapnr(max_pfn); 316 set_max_mapnr(max_pfn);
308 free_all_bootmem(); 317 free_all_bootmem();
diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c
index 2ee4a707f0df..a3f7abd2f13f 100644
--- a/arch/powerpc/perf/power8-pmu.c
+++ b/arch/powerpc/perf/power8-pmu.c
@@ -199,6 +199,7 @@
199#define MMCR1_UNIT_SHIFT(pmc) (60 - (4 * ((pmc) - 1))) 199#define MMCR1_UNIT_SHIFT(pmc) (60 - (4 * ((pmc) - 1)))
200#define MMCR1_COMBINE_SHIFT(pmc) (35 - ((pmc) - 1)) 200#define MMCR1_COMBINE_SHIFT(pmc) (35 - ((pmc) - 1))
201#define MMCR1_PMCSEL_SHIFT(pmc) (24 - (((pmc) - 1)) * 8) 201#define MMCR1_PMCSEL_SHIFT(pmc) (24 - (((pmc) - 1)) * 8)
202#define MMCR1_FAB_SHIFT 36
202#define MMCR1_DC_QUAL_SHIFT 47 203#define MMCR1_DC_QUAL_SHIFT 47
203#define MMCR1_IC_QUAL_SHIFT 46 204#define MMCR1_IC_QUAL_SHIFT 46
204 205
@@ -388,8 +389,8 @@ static int power8_compute_mmcr(u64 event[], int n_ev,
388 * the threshold bits are used for the match value. 389 * the threshold bits are used for the match value.
389 */ 390 */
390 if (event_is_fab_match(event[i])) { 391 if (event_is_fab_match(event[i])) {
391 mmcr1 |= (event[i] >> EVENT_THR_CTL_SHIFT) & 392 mmcr1 |= ((event[i] >> EVENT_THR_CTL_SHIFT) &
392 EVENT_THR_CTL_MASK; 393 EVENT_THR_CTL_MASK) << MMCR1_FAB_SHIFT;
393 } else { 394 } else {
394 val = (event[i] >> EVENT_THR_CTL_SHIFT) & EVENT_THR_CTL_MASK; 395 val = (event[i] >> EVENT_THR_CTL_SHIFT) & EVENT_THR_CTL_MASK;
395 mmcra |= val << MMCRA_THR_CTL_SHIFT; 396 mmcra |= val << MMCRA_THR_CTL_SHIFT;
diff --git a/mm/Kconfig b/mm/Kconfig
index 026771a9b097..394838f489eb 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -183,7 +183,7 @@ config MEMORY_HOTPLUG_SPARSE
183config MEMORY_HOTREMOVE 183config MEMORY_HOTREMOVE
184 bool "Allow for memory hot remove" 184 bool "Allow for memory hot remove"
185 select MEMORY_ISOLATION 185 select MEMORY_ISOLATION
186 select HAVE_BOOTMEM_INFO_NODE if X86_64 186 select HAVE_BOOTMEM_INFO_NODE if (X86_64 || PPC64)
187 depends on MEMORY_HOTPLUG && ARCH_ENABLE_MEMORY_HOTREMOVE 187 depends on MEMORY_HOTPLUG && ARCH_ENABLE_MEMORY_HOTREMOVE
188 depends on MIGRATION 188 depends on MIGRATION
189 189