aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/dma.c8
-rw-r--r--arch/powerpc/kernel/eeh.c19
-rw-r--r--arch/powerpc/kernel/eeh_driver.c12
-rw-r--r--arch/powerpc/kernel/eeh_pe.c10
-rw-r--r--arch/powerpc/kernel/eeh_sysfs.c2
-rw-r--r--arch/powerpc/kernel/entry_64.S6
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S5
-rw-r--r--arch/powerpc/kernel/fadump.c114
-rw-r--r--arch/powerpc/kernel/irq.c2
-rw-r--r--arch/powerpc/kernel/misc.S4
-rw-r--r--arch/powerpc/kernel/pci_64.c10
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c2
-rw-r--r--arch/powerpc/kernel/process.c2
-rw-r--r--arch/powerpc/kernel/rtas_pci.c30
-rw-r--r--arch/powerpc/kernel/setup_64.c32
-rw-r--r--arch/powerpc/kernel/stacktrace.c2
-rw-r--r--arch/powerpc/kernel/vdso32/getcpu.S4
17 files changed, 138 insertions, 126 deletions
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index adac9dc54aee..484b2d4462c1 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -53,9 +53,16 @@ void *dma_direct_alloc_coherent(struct device *dev, size_t size,
53#else 53#else
54 struct page *page; 54 struct page *page;
55 int node = dev_to_node(dev); 55 int node = dev_to_node(dev);
56#ifdef CONFIG_FSL_SOC
56 u64 pfn = get_pfn_limit(dev); 57 u64 pfn = get_pfn_limit(dev);
57 int zone; 58 int zone;
58 59
60 /*
61 * This code should be OK on other platforms, but we have drivers that
62 * don't set coherent_dma_mask. As a workaround we just ifdef it. This
63 * whole routine needs some serious cleanup.
64 */
65
59 zone = dma_pfn_limit_to_zone(pfn); 66 zone = dma_pfn_limit_to_zone(pfn);
60 if (zone < 0) { 67 if (zone < 0) {
61 dev_err(dev, "%s: No suitable zone for pfn %#llx\n", 68 dev_err(dev, "%s: No suitable zone for pfn %#llx\n",
@@ -73,6 +80,7 @@ void *dma_direct_alloc_coherent(struct device *dev, size_t size,
73 break; 80 break;
74#endif 81#endif
75 }; 82 };
83#endif /* CONFIG_FSL_SOC */
76 84
77 /* ignore region specifiers */ 85 /* ignore region specifiers */
78 flag &= ~(__GFP_HIGHMEM); 86 flag &= ~(__GFP_HIGHMEM);
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index d543e4179c18..2248a1999c64 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -257,6 +257,13 @@ static void *eeh_dump_pe_log(void *data, void *flag)
257 struct eeh_dev *edev, *tmp; 257 struct eeh_dev *edev, *tmp;
258 size_t *plen = flag; 258 size_t *plen = flag;
259 259
260 /* If the PE's config space is blocked, 0xFF's will be
261 * returned. It's pointless to collect the log in this
262 * case.
263 */
264 if (pe->state & EEH_PE_CFG_BLOCKED)
265 return NULL;
266
260 eeh_pe_for_each_dev(pe, edev, tmp) 267 eeh_pe_for_each_dev(pe, edev, tmp)
261 *plen += eeh_dump_dev_log(edev, pci_regs_buf + *plen, 268 *plen += eeh_dump_dev_log(edev, pci_regs_buf + *plen,
262 EEH_PCI_REGS_LOG_LEN - *plen); 269 EEH_PCI_REGS_LOG_LEN - *plen);
@@ -673,18 +680,18 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state stat
673 switch (state) { 680 switch (state) {
674 case pcie_deassert_reset: 681 case pcie_deassert_reset:
675 eeh_ops->reset(pe, EEH_RESET_DEACTIVATE); 682 eeh_ops->reset(pe, EEH_RESET_DEACTIVATE);
676 eeh_pe_state_clear(pe, EEH_PE_RESET); 683 eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED);
677 break; 684 break;
678 case pcie_hot_reset: 685 case pcie_hot_reset:
679 eeh_pe_state_mark(pe, EEH_PE_RESET); 686 eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED);
680 eeh_ops->reset(pe, EEH_RESET_HOT); 687 eeh_ops->reset(pe, EEH_RESET_HOT);
681 break; 688 break;
682 case pcie_warm_reset: 689 case pcie_warm_reset:
683 eeh_pe_state_mark(pe, EEH_PE_RESET); 690 eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED);
684 eeh_ops->reset(pe, EEH_RESET_FUNDAMENTAL); 691 eeh_ops->reset(pe, EEH_RESET_FUNDAMENTAL);
685 break; 692 break;
686 default: 693 default:
687 eeh_pe_state_clear(pe, EEH_PE_RESET); 694 eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED);
688 return -EINVAL; 695 return -EINVAL;
689 }; 696 };
690 697
@@ -1523,7 +1530,7 @@ int eeh_pe_reset(struct eeh_pe *pe, int option)
1523 switch (option) { 1530 switch (option) {
1524 case EEH_RESET_DEACTIVATE: 1531 case EEH_RESET_DEACTIVATE:
1525 ret = eeh_ops->reset(pe, option); 1532 ret = eeh_ops->reset(pe, option);
1526 eeh_pe_state_clear(pe, EEH_PE_RESET); 1533 eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED);
1527 if (ret) 1534 if (ret)
1528 break; 1535 break;
1529 1536
@@ -1538,7 +1545,7 @@ int eeh_pe_reset(struct eeh_pe *pe, int option)
1538 */ 1545 */
1539 eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE); 1546 eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE);
1540 1547
1541 eeh_pe_state_mark(pe, EEH_PE_RESET); 1548 eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED);
1542 ret = eeh_ops->reset(pe, option); 1549 ret = eeh_ops->reset(pe, option);
1543 break; 1550 break;
1544 default: 1551 default:
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 3fd514f8e4b2..6535936bdf27 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -528,13 +528,13 @@ int eeh_pe_reset_and_recover(struct eeh_pe *pe)
528 eeh_pe_dev_traverse(pe, eeh_report_error, &result); 528 eeh_pe_dev_traverse(pe, eeh_report_error, &result);
529 529
530 /* Issue reset */ 530 /* Issue reset */
531 eeh_pe_state_mark(pe, EEH_PE_RESET); 531 eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED);
532 ret = eeh_reset_pe(pe); 532 ret = eeh_reset_pe(pe);
533 if (ret) { 533 if (ret) {
534 eeh_pe_state_clear(pe, EEH_PE_RECOVERING | EEH_PE_RESET); 534 eeh_pe_state_clear(pe, EEH_PE_RECOVERING | EEH_PE_CFG_BLOCKED);
535 return ret; 535 return ret;
536 } 536 }
537 eeh_pe_state_clear(pe, EEH_PE_RESET); 537 eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED);
538 538
539 /* Unfreeze the PE */ 539 /* Unfreeze the PE */
540 ret = eeh_clear_pe_frozen_state(pe, true); 540 ret = eeh_clear_pe_frozen_state(pe, true);
@@ -601,10 +601,10 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
601 * config accesses. So we prefer to block them. However, controlled 601 * config accesses. So we prefer to block them. However, controlled
602 * PCI config accesses initiated from EEH itself are allowed. 602 * PCI config accesses initiated from EEH itself are allowed.
603 */ 603 */
604 eeh_pe_state_mark(pe, EEH_PE_RESET); 604 eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED);
605 rc = eeh_reset_pe(pe); 605 rc = eeh_reset_pe(pe);
606 if (rc) { 606 if (rc) {
607 eeh_pe_state_clear(pe, EEH_PE_RESET); 607 eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED);
608 return rc; 608 return rc;
609 } 609 }
610 610
@@ -613,7 +613,7 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
613 /* Restore PE */ 613 /* Restore PE */
614 eeh_ops->configure_bridge(pe); 614 eeh_ops->configure_bridge(pe);
615 eeh_pe_restore_bars(pe); 615 eeh_pe_restore_bars(pe);
616 eeh_pe_state_clear(pe, EEH_PE_RESET); 616 eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED);
617 617
618 /* Clear frozen state */ 618 /* Clear frozen state */
619 rc = eeh_clear_pe_frozen_state(pe, false); 619 rc = eeh_clear_pe_frozen_state(pe, false);
diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
index 53dd0915e690..5a63e2b0f65b 100644
--- a/arch/powerpc/kernel/eeh_pe.c
+++ b/arch/powerpc/kernel/eeh_pe.c
@@ -525,7 +525,7 @@ static void *__eeh_pe_state_mark(void *data, void *flag)
525 pe->state |= state; 525 pe->state |= state;
526 526
527 /* Offline PCI devices if applicable */ 527 /* Offline PCI devices if applicable */
528 if (state != EEH_PE_ISOLATED) 528 if (!(state & EEH_PE_ISOLATED))
529 return NULL; 529 return NULL;
530 530
531 eeh_pe_for_each_dev(pe, edev, tmp) { 531 eeh_pe_for_each_dev(pe, edev, tmp) {
@@ -534,6 +534,10 @@ static void *__eeh_pe_state_mark(void *data, void *flag)
534 pdev->error_state = pci_channel_io_frozen; 534 pdev->error_state = pci_channel_io_frozen;
535 } 535 }
536 536
537 /* Block PCI config access if required */
538 if (pe->state & EEH_PE_CFG_RESTRICTED)
539 pe->state |= EEH_PE_CFG_BLOCKED;
540
537 return NULL; 541 return NULL;
538} 542}
539 543
@@ -611,6 +615,10 @@ static void *__eeh_pe_state_clear(void *data, void *flag)
611 pdev->error_state = pci_channel_io_normal; 615 pdev->error_state = pci_channel_io_normal;
612 } 616 }
613 617
618 /* Unblock PCI config access if required */
619 if (pe->state & EEH_PE_CFG_RESTRICTED)
620 pe->state &= ~EEH_PE_CFG_BLOCKED;
621
614 return NULL; 622 return NULL;
615} 623}
616 624
diff --git a/arch/powerpc/kernel/eeh_sysfs.c b/arch/powerpc/kernel/eeh_sysfs.c
index f19b1e5cb060..1ceecdda810b 100644
--- a/arch/powerpc/kernel/eeh_sysfs.c
+++ b/arch/powerpc/kernel/eeh_sysfs.c
@@ -65,7 +65,7 @@ static ssize_t eeh_pe_state_show(struct device *dev,
65 return -ENODEV; 65 return -ENODEV;
66 66
67 state = eeh_ops->get_state(edev->pe, NULL); 67 state = eeh_ops->get_state(edev->pe, NULL);
68 return sprintf(buf, "%0x08x %0x08x\n", 68 return sprintf(buf, "0x%08x 0x%08x\n",
69 state, edev->pe->state); 69 state, edev->pe->state);
70} 70}
71 71
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 5bbd1bc8c3b0..0905c8da90f1 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -659,7 +659,13 @@ _GLOBAL(ret_from_except_lite)
6593: 6593:
660#endif 660#endif
661 bl save_nvgprs 661 bl save_nvgprs
662 /*
663 * Use a non volatile GPR to save and restore our thread_info flags
664 * across the call to restore_interrupts.
665 */
666 mr r30,r4
662 bl restore_interrupts 667 bl restore_interrupts
668 mr r4,r30
663 addi r3,r1,STACK_FRAME_OVERHEAD 669 addi r3,r1,STACK_FRAME_OVERHEAD
664 bl do_notify_resume 670 bl do_notify_resume
665 b ret_from_except 671 b ret_from_except
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 050f79a4a168..72e783ea0681 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -1270,11 +1270,6 @@ hmi_exception_early:
1270 addi r3,r1,STACK_FRAME_OVERHEAD 1270 addi r3,r1,STACK_FRAME_OVERHEAD
1271 bl hmi_exception_realmode 1271 bl hmi_exception_realmode
1272 /* Windup the stack. */ 1272 /* Windup the stack. */
1273 /* Clear MSR_RI before setting SRR0 and SRR1. */
1274 li r0,MSR_RI
1275 mfmsr r9 /* get MSR value */
1276 andc r9,r9,r0
1277 mtmsrd r9,1 /* Clear MSR_RI */
1278 /* Move original HSRR0 and HSRR1 into the respective regs */ 1273 /* Move original HSRR0 and HSRR1 into the respective regs */
1279 ld r9,_MSR(r1) 1274 ld r9,_MSR(r1)
1280 mtspr SPRN_HSRR1,r9 1275 mtspr SPRN_HSRR1,r9
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index 742694c1d852..26d091a1a54c 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -58,7 +58,7 @@ int __init early_init_dt_scan_fw_dump(unsigned long node,
58 const __be32 *sections; 58 const __be32 *sections;
59 int i, num_sections; 59 int i, num_sections;
60 int size; 60 int size;
61 const int *token; 61 const __be32 *token;
62 62
63 if (depth != 1 || strcmp(uname, "rtas") != 0) 63 if (depth != 1 || strcmp(uname, "rtas") != 0)
64 return 0; 64 return 0;
@@ -72,7 +72,7 @@ int __init early_init_dt_scan_fw_dump(unsigned long node,
72 return 1; 72 return 1;
73 73
74 fw_dump.fadump_supported = 1; 74 fw_dump.fadump_supported = 1;
75 fw_dump.ibm_configure_kernel_dump = *token; 75 fw_dump.ibm_configure_kernel_dump = be32_to_cpu(*token);
76 76
77 /* 77 /*
78 * The 'ibm,kernel-dump' rtas node is present only if there is 78 * The 'ibm,kernel-dump' rtas node is present only if there is
@@ -147,11 +147,11 @@ static unsigned long init_fadump_mem_struct(struct fadump_mem_struct *fdm,
147 memset(fdm, 0, sizeof(struct fadump_mem_struct)); 147 memset(fdm, 0, sizeof(struct fadump_mem_struct));
148 addr = addr & PAGE_MASK; 148 addr = addr & PAGE_MASK;
149 149
150 fdm->header.dump_format_version = 0x00000001; 150 fdm->header.dump_format_version = cpu_to_be32(0x00000001);
151 fdm->header.dump_num_sections = 3; 151 fdm->header.dump_num_sections = cpu_to_be16(3);
152 fdm->header.dump_status_flag = 0; 152 fdm->header.dump_status_flag = 0;
153 fdm->header.offset_first_dump_section = 153 fdm->header.offset_first_dump_section =
154 (u32)offsetof(struct fadump_mem_struct, cpu_state_data); 154 cpu_to_be32((u32)offsetof(struct fadump_mem_struct, cpu_state_data));
155 155
156 /* 156 /*
157 * Fields for disk dump option. 157 * Fields for disk dump option.
@@ -167,27 +167,27 @@ static unsigned long init_fadump_mem_struct(struct fadump_mem_struct *fdm,
167 167
168 /* Kernel dump sections */ 168 /* Kernel dump sections */
169 /* cpu state data section. */ 169 /* cpu state data section. */
170 fdm->cpu_state_data.request_flag = FADUMP_REQUEST_FLAG; 170 fdm->cpu_state_data.request_flag = cpu_to_be32(FADUMP_REQUEST_FLAG);
171 fdm->cpu_state_data.source_data_type = FADUMP_CPU_STATE_DATA; 171 fdm->cpu_state_data.source_data_type = cpu_to_be16(FADUMP_CPU_STATE_DATA);
172 fdm->cpu_state_data.source_address = 0; 172 fdm->cpu_state_data.source_address = 0;
173 fdm->cpu_state_data.source_len = fw_dump.cpu_state_data_size; 173 fdm->cpu_state_data.source_len = cpu_to_be64(fw_dump.cpu_state_data_size);
174 fdm->cpu_state_data.destination_address = addr; 174 fdm->cpu_state_data.destination_address = cpu_to_be64(addr);
175 addr += fw_dump.cpu_state_data_size; 175 addr += fw_dump.cpu_state_data_size;
176 176
177 /* hpte region section */ 177 /* hpte region section */
178 fdm->hpte_region.request_flag = FADUMP_REQUEST_FLAG; 178 fdm->hpte_region.request_flag = cpu_to_be32(FADUMP_REQUEST_FLAG);
179 fdm->hpte_region.source_data_type = FADUMP_HPTE_REGION; 179 fdm->hpte_region.source_data_type = cpu_to_be16(FADUMP_HPTE_REGION);
180 fdm->hpte_region.source_address = 0; 180 fdm->hpte_region.source_address = 0;
181 fdm->hpte_region.source_len = fw_dump.hpte_region_size; 181 fdm->hpte_region.source_len = cpu_to_be64(fw_dump.hpte_region_size);
182 fdm->hpte_region.destination_address = addr; 182 fdm->hpte_region.destination_address = cpu_to_be64(addr);
183 addr += fw_dump.hpte_region_size; 183 addr += fw_dump.hpte_region_size;
184 184
185 /* RMA region section */ 185 /* RMA region section */
186 fdm->rmr_region.request_flag = FADUMP_REQUEST_FLAG; 186 fdm->rmr_region.request_flag = cpu_to_be32(FADUMP_REQUEST_FLAG);
187 fdm->rmr_region.source_data_type = FADUMP_REAL_MODE_REGION; 187 fdm->rmr_region.source_data_type = cpu_to_be16(FADUMP_REAL_MODE_REGION);
188 fdm->rmr_region.source_address = RMA_START; 188 fdm->rmr_region.source_address = cpu_to_be64(RMA_START);
189 fdm->rmr_region.source_len = fw_dump.boot_memory_size; 189 fdm->rmr_region.source_len = cpu_to_be64(fw_dump.boot_memory_size);
190 fdm->rmr_region.destination_address = addr; 190 fdm->rmr_region.destination_address = cpu_to_be64(addr);
191 addr += fw_dump.boot_memory_size; 191 addr += fw_dump.boot_memory_size;
192 192
193 return addr; 193 return addr;
@@ -272,7 +272,7 @@ int __init fadump_reserve_mem(void)
272 * first kernel. 272 * first kernel.
273 */ 273 */
274 if (fdm_active) 274 if (fdm_active)
275 fw_dump.boot_memory_size = fdm_active->rmr_region.source_len; 275 fw_dump.boot_memory_size = be64_to_cpu(fdm_active->rmr_region.source_len);
276 else 276 else
277 fw_dump.boot_memory_size = fadump_calculate_reserve_size(); 277 fw_dump.boot_memory_size = fadump_calculate_reserve_size();
278 278
@@ -314,8 +314,8 @@ int __init fadump_reserve_mem(void)
314 (unsigned long)(base >> 20)); 314 (unsigned long)(base >> 20));
315 315
316 fw_dump.fadumphdr_addr = 316 fw_dump.fadumphdr_addr =
317 fdm_active->rmr_region.destination_address + 317 be64_to_cpu(fdm_active->rmr_region.destination_address) +
318 fdm_active->rmr_region.source_len; 318 be64_to_cpu(fdm_active->rmr_region.source_len);
319 pr_debug("fadumphdr_addr = %p\n", 319 pr_debug("fadumphdr_addr = %p\n",
320 (void *) fw_dump.fadumphdr_addr); 320 (void *) fw_dump.fadumphdr_addr);
321 } else { 321 } else {
@@ -472,9 +472,9 @@ fadump_read_registers(struct fadump_reg_entry *reg_entry, struct pt_regs *regs)
472{ 472{
473 memset(regs, 0, sizeof(struct pt_regs)); 473 memset(regs, 0, sizeof(struct pt_regs));
474 474
475 while (reg_entry->reg_id != REG_ID("CPUEND")) { 475 while (be64_to_cpu(reg_entry->reg_id) != REG_ID("CPUEND")) {
476 fadump_set_regval(regs, reg_entry->reg_id, 476 fadump_set_regval(regs, be64_to_cpu(reg_entry->reg_id),
477 reg_entry->reg_value); 477 be64_to_cpu(reg_entry->reg_value));
478 reg_entry++; 478 reg_entry++;
479 } 479 }
480 reg_entry++; 480 reg_entry++;
@@ -603,20 +603,20 @@ static int __init fadump_build_cpu_notes(const struct fadump_mem_struct *fdm)
603 if (!fdm->cpu_state_data.bytes_dumped) 603 if (!fdm->cpu_state_data.bytes_dumped)
604 return -EINVAL; 604 return -EINVAL;
605 605
606 addr = fdm->cpu_state_data.destination_address; 606 addr = be64_to_cpu(fdm->cpu_state_data.destination_address);
607 vaddr = __va(addr); 607 vaddr = __va(addr);
608 608
609 reg_header = vaddr; 609 reg_header = vaddr;
610 if (reg_header->magic_number != REGSAVE_AREA_MAGIC) { 610 if (be64_to_cpu(reg_header->magic_number) != REGSAVE_AREA_MAGIC) {
611 printk(KERN_ERR "Unable to read register save area.\n"); 611 printk(KERN_ERR "Unable to read register save area.\n");
612 return -ENOENT; 612 return -ENOENT;
613 } 613 }
614 pr_debug("--------CPU State Data------------\n"); 614 pr_debug("--------CPU State Data------------\n");
615 pr_debug("Magic Number: %llx\n", reg_header->magic_number); 615 pr_debug("Magic Number: %llx\n", be64_to_cpu(reg_header->magic_number));
616 pr_debug("NumCpuOffset: %x\n", reg_header->num_cpu_offset); 616 pr_debug("NumCpuOffset: %x\n", be32_to_cpu(reg_header->num_cpu_offset));
617 617
618 vaddr += reg_header->num_cpu_offset; 618 vaddr += be32_to_cpu(reg_header->num_cpu_offset);
619 num_cpus = *((u32 *)(vaddr)); 619 num_cpus = be32_to_cpu(*((__be32 *)(vaddr)));
620 pr_debug("NumCpus : %u\n", num_cpus); 620 pr_debug("NumCpus : %u\n", num_cpus);
621 vaddr += sizeof(u32); 621 vaddr += sizeof(u32);
622 reg_entry = (struct fadump_reg_entry *)vaddr; 622 reg_entry = (struct fadump_reg_entry *)vaddr;
@@ -639,13 +639,13 @@ static int __init fadump_build_cpu_notes(const struct fadump_mem_struct *fdm)
639 fdh = __va(fw_dump.fadumphdr_addr); 639 fdh = __va(fw_dump.fadumphdr_addr);
640 640
641 for (i = 0; i < num_cpus; i++) { 641 for (i = 0; i < num_cpus; i++) {
642 if (reg_entry->reg_id != REG_ID("CPUSTRT")) { 642 if (be64_to_cpu(reg_entry->reg_id) != REG_ID("CPUSTRT")) {
643 printk(KERN_ERR "Unable to read CPU state data\n"); 643 printk(KERN_ERR "Unable to read CPU state data\n");
644 rc = -ENOENT; 644 rc = -ENOENT;
645 goto error_out; 645 goto error_out;
646 } 646 }
647 /* Lower 4 bytes of reg_value contains logical cpu id */ 647 /* Lower 4 bytes of reg_value contains logical cpu id */
648 cpu = reg_entry->reg_value & FADUMP_CPU_ID_MASK; 648 cpu = be64_to_cpu(reg_entry->reg_value) & FADUMP_CPU_ID_MASK;
649 if (fdh && !cpumask_test_cpu(cpu, &fdh->cpu_online_mask)) { 649 if (fdh && !cpumask_test_cpu(cpu, &fdh->cpu_online_mask)) {
650 SKIP_TO_NEXT_CPU(reg_entry); 650 SKIP_TO_NEXT_CPU(reg_entry);
651 continue; 651 continue;
@@ -692,7 +692,7 @@ static int __init process_fadump(const struct fadump_mem_struct *fdm_active)
692 return -EINVAL; 692 return -EINVAL;
693 693
694 /* Check if the dump data is valid. */ 694 /* Check if the dump data is valid. */
695 if ((fdm_active->header.dump_status_flag == FADUMP_ERROR_FLAG) || 695 if ((be16_to_cpu(fdm_active->header.dump_status_flag) == FADUMP_ERROR_FLAG) ||
696 (fdm_active->cpu_state_data.error_flags != 0) || 696 (fdm_active->cpu_state_data.error_flags != 0) ||
697 (fdm_active->rmr_region.error_flags != 0)) { 697 (fdm_active->rmr_region.error_flags != 0)) {
698 printk(KERN_ERR "Dump taken by platform is not valid\n"); 698 printk(KERN_ERR "Dump taken by platform is not valid\n");
@@ -828,7 +828,7 @@ static void fadump_setup_crash_memory_ranges(void)
828static inline unsigned long fadump_relocate(unsigned long paddr) 828static inline unsigned long fadump_relocate(unsigned long paddr)
829{ 829{
830 if (paddr > RMA_START && paddr < fw_dump.boot_memory_size) 830 if (paddr > RMA_START && paddr < fw_dump.boot_memory_size)
831 return fdm.rmr_region.destination_address + paddr; 831 return be64_to_cpu(fdm.rmr_region.destination_address) + paddr;
832 else 832 else
833 return paddr; 833 return paddr;
834} 834}
@@ -902,7 +902,7 @@ static int fadump_create_elfcore_headers(char *bufp)
902 * to the specified destination_address. Hence set 902 * to the specified destination_address. Hence set
903 * the correct offset. 903 * the correct offset.
904 */ 904 */
905 phdr->p_offset = fdm.rmr_region.destination_address; 905 phdr->p_offset = be64_to_cpu(fdm.rmr_region.destination_address);
906 } 906 }
907 907
908 phdr->p_paddr = mbase; 908 phdr->p_paddr = mbase;
@@ -951,7 +951,7 @@ static void register_fadump(void)
951 951
952 fadump_setup_crash_memory_ranges(); 952 fadump_setup_crash_memory_ranges();
953 953
954 addr = fdm.rmr_region.destination_address + fdm.rmr_region.source_len; 954 addr = be64_to_cpu(fdm.rmr_region.destination_address) + be64_to_cpu(fdm.rmr_region.source_len);
955 /* Initialize fadump crash info header. */ 955 /* Initialize fadump crash info header. */
956 addr = init_fadump_header(addr); 956 addr = init_fadump_header(addr);
957 vaddr = __va(addr); 957 vaddr = __va(addr);
@@ -1023,7 +1023,7 @@ void fadump_cleanup(void)
1023 /* Invalidate the registration only if dump is active. */ 1023 /* Invalidate the registration only if dump is active. */
1024 if (fw_dump.dump_active) { 1024 if (fw_dump.dump_active) {
1025 init_fadump_mem_struct(&fdm, 1025 init_fadump_mem_struct(&fdm,
1026 fdm_active->cpu_state_data.destination_address); 1026 be64_to_cpu(fdm_active->cpu_state_data.destination_address));
1027 fadump_invalidate_dump(&fdm); 1027 fadump_invalidate_dump(&fdm);
1028 } 1028 }
1029} 1029}
@@ -1063,7 +1063,7 @@ static void fadump_invalidate_release_mem(void)
1063 return; 1063 return;
1064 } 1064 }
1065 1065
1066 destination_address = fdm_active->cpu_state_data.destination_address; 1066 destination_address = be64_to_cpu(fdm_active->cpu_state_data.destination_address);
1067 fadump_cleanup(); 1067 fadump_cleanup();
1068 mutex_unlock(&fadump_mutex); 1068 mutex_unlock(&fadump_mutex);
1069 1069
@@ -1183,31 +1183,31 @@ static int fadump_region_show(struct seq_file *m, void *private)
1183 seq_printf(m, 1183 seq_printf(m,
1184 "CPU : [%#016llx-%#016llx] %#llx bytes, " 1184 "CPU : [%#016llx-%#016llx] %#llx bytes, "
1185 "Dumped: %#llx\n", 1185 "Dumped: %#llx\n",
1186 fdm_ptr->cpu_state_data.destination_address, 1186 be64_to_cpu(fdm_ptr->cpu_state_data.destination_address),
1187 fdm_ptr->cpu_state_data.destination_address + 1187 be64_to_cpu(fdm_ptr->cpu_state_data.destination_address) +
1188 fdm_ptr->cpu_state_data.source_len - 1, 1188 be64_to_cpu(fdm_ptr->cpu_state_data.source_len) - 1,
1189 fdm_ptr->cpu_state_data.source_len, 1189 be64_to_cpu(fdm_ptr->cpu_state_data.source_len),
1190 fdm_ptr->cpu_state_data.bytes_dumped); 1190 be64_to_cpu(fdm_ptr->cpu_state_data.bytes_dumped));
1191 seq_printf(m, 1191 seq_printf(m,
1192 "HPTE: [%#016llx-%#016llx] %#llx bytes, " 1192 "HPTE: [%#016llx-%#016llx] %#llx bytes, "
1193 "Dumped: %#llx\n", 1193 "Dumped: %#llx\n",
1194 fdm_ptr->hpte_region.destination_address, 1194 be64_to_cpu(fdm_ptr->hpte_region.destination_address),
1195 fdm_ptr->hpte_region.destination_address + 1195 be64_to_cpu(fdm_ptr->hpte_region.destination_address) +
1196 fdm_ptr->hpte_region.source_len - 1, 1196 be64_to_cpu(fdm_ptr->hpte_region.source_len) - 1,
1197 fdm_ptr->hpte_region.source_len, 1197 be64_to_cpu(fdm_ptr->hpte_region.source_len),
1198 fdm_ptr->hpte_region.bytes_dumped); 1198 be64_to_cpu(fdm_ptr->hpte_region.bytes_dumped));
1199 seq_printf(m, 1199 seq_printf(m,
1200 "DUMP: [%#016llx-%#016llx] %#llx bytes, " 1200 "DUMP: [%#016llx-%#016llx] %#llx bytes, "
1201 "Dumped: %#llx\n", 1201 "Dumped: %#llx\n",
1202 fdm_ptr->rmr_region.destination_address, 1202 be64_to_cpu(fdm_ptr->rmr_region.destination_address),
1203 fdm_ptr->rmr_region.destination_address + 1203 be64_to_cpu(fdm_ptr->rmr_region.destination_address) +
1204 fdm_ptr->rmr_region.source_len - 1, 1204 be64_to_cpu(fdm_ptr->rmr_region.source_len) - 1,
1205 fdm_ptr->rmr_region.source_len, 1205 be64_to_cpu(fdm_ptr->rmr_region.source_len),
1206 fdm_ptr->rmr_region.bytes_dumped); 1206 be64_to_cpu(fdm_ptr->rmr_region.bytes_dumped));
1207 1207
1208 if (!fdm_active || 1208 if (!fdm_active ||
1209 (fw_dump.reserve_dump_area_start == 1209 (fw_dump.reserve_dump_area_start ==
1210 fdm_ptr->cpu_state_data.destination_address)) 1210 be64_to_cpu(fdm_ptr->cpu_state_data.destination_address)))
1211 goto out; 1211 goto out;
1212 1212
1213 /* Dump is active. Show reserved memory region. */ 1213 /* Dump is active. Show reserved memory region. */
@@ -1215,10 +1215,10 @@ static int fadump_region_show(struct seq_file *m, void *private)
1215 " : [%#016llx-%#016llx] %#llx bytes, " 1215 " : [%#016llx-%#016llx] %#llx bytes, "
1216 "Dumped: %#llx\n", 1216 "Dumped: %#llx\n",
1217 (unsigned long long)fw_dump.reserve_dump_area_start, 1217 (unsigned long long)fw_dump.reserve_dump_area_start,
1218 fdm_ptr->cpu_state_data.destination_address - 1, 1218 be64_to_cpu(fdm_ptr->cpu_state_data.destination_address) - 1,
1219 fdm_ptr->cpu_state_data.destination_address - 1219 be64_to_cpu(fdm_ptr->cpu_state_data.destination_address) -
1220 fw_dump.reserve_dump_area_start, 1220 fw_dump.reserve_dump_area_start,
1221 fdm_ptr->cpu_state_data.destination_address - 1221 be64_to_cpu(fdm_ptr->cpu_state_data.destination_address) -
1222 fw_dump.reserve_dump_area_start); 1222 fw_dump.reserve_dump_area_start);
1223out: 1223out:
1224 if (fdm_active) 1224 if (fdm_active)
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 8eb857f216c1..c14383575fe8 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -466,7 +466,7 @@ static inline void check_stack_overflow(void)
466#ifdef CONFIG_DEBUG_STACKOVERFLOW 466#ifdef CONFIG_DEBUG_STACKOVERFLOW
467 long sp; 467 long sp;
468 468
469 sp = __get_SP() & (THREAD_SIZE-1); 469 sp = current_stack_pointer() & (THREAD_SIZE-1);
470 470
471 /* check for stack overflow: is there less than 2KB free? */ 471 /* check for stack overflow: is there less than 2KB free? */
472 if (unlikely(sp < (sizeof(struct thread_info) + 2048))) { 472 if (unlikely(sp < (sizeof(struct thread_info) + 2048))) {
diff --git a/arch/powerpc/kernel/misc.S b/arch/powerpc/kernel/misc.S
index 7ce26d45777e..0d432194c018 100644
--- a/arch/powerpc/kernel/misc.S
+++ b/arch/powerpc/kernel/misc.S
@@ -114,3 +114,7 @@ _GLOBAL(longjmp)
114 mtlr r0 114 mtlr r0
115 mr r3,r4 115 mr r3,r4
116 blr 116 blr
117
118_GLOBAL(current_stack_pointer)
119 PPC_LL r3,0(r1)
120 blr
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index 155013da27e0..b15194e2c5fc 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -266,13 +266,3 @@ int pcibus_to_node(struct pci_bus *bus)
266} 266}
267EXPORT_SYMBOL(pcibus_to_node); 267EXPORT_SYMBOL(pcibus_to_node);
268#endif 268#endif
269
270static void quirk_radeon_32bit_msi(struct pci_dev *dev)
271{
272 struct pci_dn *pdn = pci_get_pdn(dev);
273
274 if (pdn)
275 pdn->force_32bit_msi = true;
276}
277DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x68f2, quirk_radeon_32bit_msi);
278DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0xaa68, quirk_radeon_32bit_msi);
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index c4dfff6c2719..202963ee013a 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -41,3 +41,5 @@ EXPORT_SYMBOL(giveup_spe);
41#ifdef CONFIG_EPAPR_PARAVIRT 41#ifdef CONFIG_EPAPR_PARAVIRT
42EXPORT_SYMBOL(epapr_hypercall_start); 42EXPORT_SYMBOL(epapr_hypercall_start);
43#endif 43#endif
44
45EXPORT_SYMBOL(current_stack_pointer);
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index aa1df89c8b2a..923cd2daba89 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1545,7 +1545,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
1545 tsk = current; 1545 tsk = current;
1546 if (sp == 0) { 1546 if (sp == 0) {
1547 if (tsk == current) 1547 if (tsk == current)
1548 asm("mr %0,1" : "=r" (sp)); 1548 sp = current_stack_pointer();
1549 else 1549 else
1550 sp = tsk->thread.ksp; 1550 sp = tsk->thread.ksp;
1551 } 1551 }
diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c
index c168337aef9d..7c55b86206b3 100644
--- a/arch/powerpc/kernel/rtas_pci.c
+++ b/arch/powerpc/kernel/rtas_pci.c
@@ -66,6 +66,11 @@ int rtas_read_config(struct pci_dn *pdn, int where, int size, u32 *val)
66 return PCIBIOS_DEVICE_NOT_FOUND; 66 return PCIBIOS_DEVICE_NOT_FOUND;
67 if (!config_access_valid(pdn, where)) 67 if (!config_access_valid(pdn, where))
68 return PCIBIOS_BAD_REGISTER_NUMBER; 68 return PCIBIOS_BAD_REGISTER_NUMBER;
69#ifdef CONFIG_EEH
70 if (pdn->edev && pdn->edev->pe &&
71 (pdn->edev->pe->state & EEH_PE_CFG_BLOCKED))
72 return PCIBIOS_SET_FAILED;
73#endif
69 74
70 addr = rtas_config_addr(pdn->busno, pdn->devfn, where); 75 addr = rtas_config_addr(pdn->busno, pdn->devfn, where);
71 buid = pdn->phb->buid; 76 buid = pdn->phb->buid;
@@ -90,9 +95,6 @@ static int rtas_pci_read_config(struct pci_bus *bus,
90 struct device_node *busdn, *dn; 95 struct device_node *busdn, *dn;
91 struct pci_dn *pdn; 96 struct pci_dn *pdn;
92 bool found = false; 97 bool found = false;
93#ifdef CONFIG_EEH
94 struct eeh_dev *edev;
95#endif
96 int ret; 98 int ret;
97 99
98 /* Search only direct children of the bus */ 100 /* Search only direct children of the bus */
@@ -109,11 +111,6 @@ static int rtas_pci_read_config(struct pci_bus *bus,
109 111
110 if (!found) 112 if (!found)
111 return PCIBIOS_DEVICE_NOT_FOUND; 113 return PCIBIOS_DEVICE_NOT_FOUND;
112#ifdef CONFIG_EEH
113 edev = of_node_to_eeh_dev(dn);
114 if (edev && edev->pe && edev->pe->state & EEH_PE_RESET)
115 return PCIBIOS_DEVICE_NOT_FOUND;
116#endif
117 114
118 ret = rtas_read_config(pdn, where, size, val); 115 ret = rtas_read_config(pdn, where, size, val);
119 if (*val == EEH_IO_ERROR_VALUE(size) && 116 if (*val == EEH_IO_ERROR_VALUE(size) &&
@@ -132,6 +129,11 @@ int rtas_write_config(struct pci_dn *pdn, int where, int size, u32 val)
132 return PCIBIOS_DEVICE_NOT_FOUND; 129 return PCIBIOS_DEVICE_NOT_FOUND;
133 if (!config_access_valid(pdn, where)) 130 if (!config_access_valid(pdn, where))
134 return PCIBIOS_BAD_REGISTER_NUMBER; 131 return PCIBIOS_BAD_REGISTER_NUMBER;
132#ifdef CONFIG_EEH
133 if (pdn->edev && pdn->edev->pe &&
134 (pdn->edev->pe->state & EEH_PE_CFG_BLOCKED))
135 return PCIBIOS_SET_FAILED;
136#endif
135 137
136 addr = rtas_config_addr(pdn->busno, pdn->devfn, where); 138 addr = rtas_config_addr(pdn->busno, pdn->devfn, where);
137 buid = pdn->phb->buid; 139 buid = pdn->phb->buid;
@@ -155,10 +157,6 @@ static int rtas_pci_write_config(struct pci_bus *bus,
155 struct device_node *busdn, *dn; 157 struct device_node *busdn, *dn;
156 struct pci_dn *pdn; 158 struct pci_dn *pdn;
157 bool found = false; 159 bool found = false;
158#ifdef CONFIG_EEH
159 struct eeh_dev *edev;
160#endif
161 int ret;
162 160
163 /* Search only direct children of the bus */ 161 /* Search only direct children of the bus */
164 busdn = pci_bus_to_OF_node(bus); 162 busdn = pci_bus_to_OF_node(bus);
@@ -173,14 +171,8 @@ static int rtas_pci_write_config(struct pci_bus *bus,
173 171
174 if (!found) 172 if (!found)
175 return PCIBIOS_DEVICE_NOT_FOUND; 173 return PCIBIOS_DEVICE_NOT_FOUND;
176#ifdef CONFIG_EEH
177 edev = of_node_to_eeh_dev(dn);
178 if (edev && edev->pe && (edev->pe->state & EEH_PE_RESET))
179 return PCIBIOS_DEVICE_NOT_FOUND;
180#endif
181 ret = rtas_write_config(pdn, where, size, val);
182 174
183 return ret; 175 return rtas_write_config(pdn, where, size, val);
184} 176}
185 177
186static struct pci_ops rtas_pci_ops = { 178static struct pci_ops rtas_pci_ops = {
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index cd07d79ad21c..4f3cfe1b6a33 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -522,36 +522,36 @@ void __init setup_system(void)
522 smp_release_cpus(); 522 smp_release_cpus();
523#endif 523#endif
524 524
525 printk("Starting Linux PPC64 %s\n", init_utsname()->version); 525 pr_info("Starting Linux PPC64 %s\n", init_utsname()->version);
526 526
527 printk("-----------------------------------------------------\n"); 527 pr_info("-----------------------------------------------------\n");
528 printk("ppc64_pft_size = 0x%llx\n", ppc64_pft_size); 528 pr_info("ppc64_pft_size = 0x%llx\n", ppc64_pft_size);
529 printk("phys_mem_size = 0x%llx\n", memblock_phys_mem_size()); 529 pr_info("phys_mem_size = 0x%llx\n", memblock_phys_mem_size());
530 530
531 if (ppc64_caches.dline_size != 0x80) 531 if (ppc64_caches.dline_size != 0x80)
532 printk("dcache_line_size = 0x%x\n", ppc64_caches.dline_size); 532 pr_info("dcache_line_size = 0x%x\n", ppc64_caches.dline_size);
533 if (ppc64_caches.iline_size != 0x80) 533 if (ppc64_caches.iline_size != 0x80)
534 printk("icache_line_size = 0x%x\n", ppc64_caches.iline_size); 534 pr_info("icache_line_size = 0x%x\n", ppc64_caches.iline_size);
535 535
536 printk("cpu_features = 0x%016lx\n", cur_cpu_spec->cpu_features); 536 pr_info("cpu_features = 0x%016lx\n", cur_cpu_spec->cpu_features);
537 printk(" possible = 0x%016lx\n", CPU_FTRS_POSSIBLE); 537 pr_info(" possible = 0x%016lx\n", CPU_FTRS_POSSIBLE);
538 printk(" always = 0x%016lx\n", CPU_FTRS_ALWAYS); 538 pr_info(" always = 0x%016lx\n", CPU_FTRS_ALWAYS);
539 printk("cpu_user_features = 0x%08x 0x%08x\n", cur_cpu_spec->cpu_user_features, 539 pr_info("cpu_user_features = 0x%08x 0x%08x\n", cur_cpu_spec->cpu_user_features,
540 cur_cpu_spec->cpu_user_features2); 540 cur_cpu_spec->cpu_user_features2);
541 printk("mmu_features = 0x%08x\n", cur_cpu_spec->mmu_features); 541 pr_info("mmu_features = 0x%08x\n", cur_cpu_spec->mmu_features);
542 printk("firmware_features = 0x%016lx\n", powerpc_firmware_features); 542 pr_info("firmware_features = 0x%016lx\n", powerpc_firmware_features);
543 543
544#ifdef CONFIG_PPC_STD_MMU_64 544#ifdef CONFIG_PPC_STD_MMU_64
545 if (htab_address) 545 if (htab_address)
546 printk("htab_address = 0x%p\n", htab_address); 546 pr_info("htab_address = 0x%p\n", htab_address);
547 547
548 printk("htab_hash_mask = 0x%lx\n", htab_hash_mask); 548 pr_info("htab_hash_mask = 0x%lx\n", htab_hash_mask);
549#endif 549#endif
550 550
551 if (PHYSICAL_START > 0) 551 if (PHYSICAL_START > 0)
552 printk("physical_start = 0x%llx\n", 552 pr_info("physical_start = 0x%llx\n",
553 (unsigned long long)PHYSICAL_START); 553 (unsigned long long)PHYSICAL_START);
554 printk("-----------------------------------------------------\n"); 554 pr_info("-----------------------------------------------------\n");
555 555
556 DBG(" <- setup_system()\n"); 556 DBG(" <- setup_system()\n");
557} 557}
diff --git a/arch/powerpc/kernel/stacktrace.c b/arch/powerpc/kernel/stacktrace.c
index 3d30ef1038e5..ea43a347a104 100644
--- a/arch/powerpc/kernel/stacktrace.c
+++ b/arch/powerpc/kernel/stacktrace.c
@@ -50,7 +50,7 @@ void save_stack_trace(struct stack_trace *trace)
50{ 50{
51 unsigned long sp; 51 unsigned long sp;
52 52
53 asm("mr %0,1" : "=r" (sp)); 53 sp = current_stack_pointer();
54 54
55 save_context_stack(trace, sp, current, 1); 55 save_context_stack(trace, sp, current, 1);
56} 56}
diff --git a/arch/powerpc/kernel/vdso32/getcpu.S b/arch/powerpc/kernel/vdso32/getcpu.S
index 23eb9a9441bd..c62be60c7274 100644
--- a/arch/powerpc/kernel/vdso32/getcpu.S
+++ b/arch/powerpc/kernel/vdso32/getcpu.S
@@ -30,8 +30,8 @@
30V_FUNCTION_BEGIN(__kernel_getcpu) 30V_FUNCTION_BEGIN(__kernel_getcpu)
31 .cfi_startproc 31 .cfi_startproc
32 mfspr r5,SPRN_SPRG_VDSO_READ 32 mfspr r5,SPRN_SPRG_VDSO_READ
33 cmpdi cr0,r3,0 33 cmpwi cr0,r3,0
34 cmpdi cr1,r4,0 34 cmpwi cr1,r4,0
35 clrlwi r6,r5,16 35 clrlwi r6,r5,16
36 rlwinm r7,r5,16,31-15,31-0 36 rlwinm r7,r5,16,31-15,31-0
37 beq cr0,1f 37 beq cr0,1f