[linux-yocto] [PATCH 4/8] arch/powerpc: Address IBM PPC476 Parity Errors
Daniel Dragomir
daniel.dragomir at windriver.com
Fri May 12 04:41:41 PDT 2017
From: John Jacques <john.jacques at intel.com>
In some cases, it is possible to recover after a machine check. This
commit adds code to attempt to recover after the following errors.
1: TLB Parity
2: I-Cache Parity
3: D-Cache Search Parity
Signed-off-by: John Jacques <john.jacques at intel.com>
---
arch/powerpc/kernel/traps.c | 193 ++++++++++++++++++++++++++++++++++++++++++--
1 file changed, 187 insertions(+), 6 deletions(-)
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 4c0ccb6..4212199 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -438,6 +438,180 @@ int machine_check_440A(struct pt_regs *regs)
}
return 0;
}
+/*
+ * In some cases, it is possible to recover from a machine check...
+ */
+
+extern phys_addr_t memstart_addr;
+extern phys_addr_t lowmem_end_addr;
+
+static int
+match_tlb_index(unsigned int in_value)
+{
+ unsigned int kernel_tlb_index;
+ unsigned long memstart;
+ unsigned long addr;
+
+ memstart = memstart_addr & ~(PPC_PIN_SIZE - 1);
+
+ for (addr = memstart ; addr < lowmem_end_addr; addr += PPC_PIN_SIZE) {
+ __asm __volatile__ (
+ "tlbsx %0, 0, %1"
+ : "=r" (kernel_tlb_index)
+ : "r" (PAGE_OFFSET + addr));
+
+ if (in_value == (kernel_tlb_index >> 16))
+ return 0;
+ }
+ return -1;
+}
+
+static int
+ppc47x_find_free_bolted_entry(void)
+{
+ unsigned int mmube0 = mfspr(SPRN_MMUBE0);
+ unsigned int mmube1 = mfspr(SPRN_MMUBE1);
+
+ if (!(mmube0 & MMUBE0_VBE0))
+ return 0;
+
+ if (!(mmube0 & MMUBE0_VBE1))
+ return 1;
+
+ if (!(mmube0 & MMUBE0_VBE2))
+ return 2;
+
+ if (!(mmube1 & MMUBE1_VBE3))
+ return 3;
+
+ if (!(mmube1 & MMUBE1_VBE4))
+ return 4;
+
+ if (!(mmube1 & MMUBE1_VBE5))
+ return 5;
+
+ return -1;
+}
+
+static int
+ppc47x_pin_kernel_tlb(unsigned int virt, unsigned int phys)
+{
+ unsigned int rA;
+ int bolted;
+
+ /* Base rA is HW way select, way 0, bolted bit set */
+ rA = 0x88000000;
+
+ /* Look for a bolted entry slot */
+ bolted = ppc47x_find_free_bolted_entry();
+
+ if (bolted < 0)
+ return -1;
+
+ /* Insert bolted slot number */
+ rA |= bolted << 24;
+
+ mtspr(SPRN_MMUCR, 0);
+ __asm__ __volatile__(
+ "tlbwe %2,%3,0\n"
+ "tlbwe %1,%3,1\n"
+ "tlbwe %0,%3,2\n"
+ :
+ : "r" (PPC47x_TLB2_SW | PPC47x_TLB2_SR |
+ PPC47x_TLB2_SX
+#ifdef CONFIG_SMP
+ | PPC47x_TLB2_M
+#endif
+ ),
+ "r" (phys),
+ "r" (virt | PPC47x_TLB0_VALID | PPC47x_TLB0_256M),
+ "r" (rA));
+ return 0;
+}
+
+static int
+correct_tlbp_47x(void)
+{
+ u32 mmucr;
+ u32 way;
+ unsigned int in_value;
+ unsigned int write_value;
+ unsigned int enc_value;
+ unsigned long addr;
+ unsigned long memstart;
+
+ memstart = memstart_addr & ~(PPC_PIN_SIZE - 1);
+ mtspr(SPRN_CCR1, 0x00000000); /* Clear parity injection */
+ mmucr = mfspr(SPRN_MMUCR); /* Save MMUCR */
+ mtspr(SPRN_MMUCR, mmucr & 0xffff0000); /* clear TID field in MMUCR */
+
+ /*
+ * Invalidate all 256x4 TLB entries except kernel static TLB entry
+ */
+
+ in_value = 0x00000000;
+ write_value = 0x00000000;
+
+ while (in_value < 256) {
+ for (way = 0; way < 4; way++) {
+ enc_value = (way << 29) + 0x80000000;
+ if (!((match_tlb_index(in_value) == 0)
+ && (way == 0))) {
+ asm volatile (
+ "tlbwe %0, %1,0"
+ :
+ : "r" (write_value) , "r" (enc_value)
+ );
+ }
+ asm volatile ("isync");
+ asm volatile ("msync");
+ asm volatile ("isync");
+ write_value = write_value + 0x10100000;
+ }
+ ++in_value;
+ write_value = (write_value & 0x000fffff) + 0x00001000;
+ }
+ /*
+ * Updating static TLB entries
+ */
+
+ for (addr = memstart ; addr < lowmem_end_addr; addr += PPC_PIN_SIZE) {
+ if (ppc47x_pin_kernel_tlb(addr + PAGE_OFFSET, addr) != 0) {
+ mtspr(SPRN_MCSR, 0);
+ mtspr(SPRN_MCSRR0, 0);
+ mtspr(SPRN_MMUCR, mmucr);
+ return 0;
+ }
+ }
+ asm volatile ("isync");
+ asm volatile ("msync");
+ mtspr(SPRN_MCSR, 0);
+ mtspr(SPRN_MCSRR0, 0);
+ mtspr(SPRN_MMUCR, mmucr);
+
+ return 1;
+}
+
+static int
+correct_icp_47x(void)
+{
+ mtspr(SPRN_CCR1, 0x00000000);
+ flush_instruction_cache();
+ mtspr(SPRN_CCR1, 0x00000000);
+ mtspr(SPRN_MCSR, 0);
+ return 1;
+}
+
+static int
+correct_dcsp_47x(void)
+{
+ mtspr(SPRN_CCR1, 0x00000000);
+ asm volatile ("dccci 0,0" : : : "memory");
+ asm volatile ("isync");
+ mtspr(SPRN_CCR1, 0x00000000);
+ mtspr(SPRN_MCSR, 0);
+ return 1;
+}
int machine_check_47x(struct pt_regs *regs)
{
@@ -458,14 +632,21 @@ int machine_check_47x(struct pt_regs *regs)
printk(KERN_ERR "Data Read PLB Error\n");
if (mcsr & MCSR_DWB)
printk(KERN_ERR "Data Write PLB Error\n");
- if (mcsr & MCSR_TLBP)
- printk(KERN_ERR "TLB Parity Error\n");
+ if (mcsr & MCSR_TLBP) {
+ printk(KERN_ERR
+ "TLB Parity Error (Attempting Correction)\n");
+ return correct_tlbp_47x();
+ }
if (mcsr & MCSR_ICP) {
- flush_instruction_cache();
- printk(KERN_ERR "I-Cache Parity Error\n");
+ printk(KERN_ERR
+ "I-Cache Parity Error (Attempting Correction)\n");
+ return correct_icp_47x();
+ }
+ if (mcsr & MCSR_DCSP) {
+ printk(KERN_ERR
+ "D-Cache Search Parity Error (Attempting Correction)\n");
+ return correct_dcsp_47x();
}
- if (mcsr & MCSR_DCSP)
- printk(KERN_ERR "D-Cache Search Parity Error\n");
if (mcsr & PPC47x_MCSR_GPR)
printk(KERN_ERR "GPR Parity Error\n");
if (mcsr & PPC47x_MCSR_FPR)
--
2.7.4
More information about the linux-yocto
mailing list