[linux-yocto] [PATCH v2 04/39] arch/arm/mach-axxia: add power management support
Cristian Bercaru
cristian.bercaru at windriver.com
Thu May 21 02:40:29 PDT 2015
From: Charlie Paul <cpaul.windriver at gmail.com>
These files add power management capabilities to the LSI Axxia 5500
platform.
Signed-off-by: Charlie Paul <cpaul.windriver at gmail.com>
---
arch/arm/mach-axxia/hotplug.c | 282 ++++++
arch/arm/mach-axxia/lsi_power_management.c | 1400 ++++++++++++++++++++++++++++
arch/arm/mach-axxia/lsi_power_management.h | 192 ++++
arch/arm/mach-axxia/perf_event_memc.c | 152 +++
arch/arm/mach-axxia/perf_event_memc.h | 67 ++
arch/arm/mach-axxia/perf_event_pcx.c | 52 ++
arch/arm/mach-axxia/perf_event_platform.c | 275 ++++++
arch/arm/mach-axxia/perf_event_platform.h | 10 +
arch/arm/mach-axxia/perf_event_vp.c | 53 ++
arch/arm/mach-axxia/rapidio.c | 112 +++
10 files changed, 2595 insertions(+)
create mode 100644 arch/arm/mach-axxia/hotplug.c
create mode 100644 arch/arm/mach-axxia/lsi_power_management.c
create mode 100644 arch/arm/mach-axxia/lsi_power_management.h
create mode 100644 arch/arm/mach-axxia/perf_event_memc.c
create mode 100644 arch/arm/mach-axxia/perf_event_memc.h
create mode 100644 arch/arm/mach-axxia/perf_event_pcx.c
create mode 100644 arch/arm/mach-axxia/perf_event_platform.c
create mode 100644 arch/arm/mach-axxia/perf_event_platform.h
create mode 100644 arch/arm/mach-axxia/perf_event_vp.c
create mode 100644 arch/arm/mach-axxia/rapidio.c
diff --git a/arch/arm/mach-axxia/hotplug.c b/arch/arm/mach-axxia/hotplug.c
new file mode 100644
index 0000000..998c56c
--- /dev/null
+++ b/arch/arm/mach-axxia/hotplug.c
@@ -0,0 +1,282 @@
+/*
+ * linux/arch/arm/mach-realview/hotplug.c
+ *
+ * Copyright (C) 2002 ARM Ltd.
+ * All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/smp.h>
+#include <linux/of_address.h>
+#include <linux/delay.h>
+
+#include <mach/axxia-gic.h>
+#include <asm/cacheflush.h>
+#include <asm/smp_plat.h>
+#include <asm/cp15.h>
+#include "lsi_power_management.h"
+#include "axxia_circular_queue.h"
+extern struct circular_queue_t axxia_circ_q;
+
+extern volatile int pen_release;
+
+static inline void pm_cpu_logical_shutdown(u32 cpu)
+{
+ u32 val;
+
+ asm volatile(
+ " mrc p15, 1, %0, c9, c0, 2\n"
+ : "=&r" (val)
+ : "Ir" (0x1)
+ : "cc");
+
+ asm volatile(
+ " mrc p15, 0, %0, c1, c0, 0\n"
+ " bic %0, %0, %1\n"
+ " mcr p15, 0, %0, c1, c0, 0\n"
+ : "=&r" (val)
+ : "Ir" (CR_C)
+ : "cc");
+
+ /* Clear and invalidate all date from L1 data cache */
+ flush_cache_all();
+
+ /* Switch the processor over to AMP mode out of SMP */
+ asm volatile(
+ " mrc p15, 0, %0, c1, c0, 1\n"
+ " bic %0, %0, %1\n"
+ " mcr p15, 0, %0, c1, c0, 1\n"
+ : "=&r" (val)
+ : "Ir" (0x40)
+ : "cc");
+
+ isb();
+ dsb();
+
+ wfi();
+
+}
+
+static inline void pm_L2_logical_shutdown(u32 cpu)
+{
+ u32 val;
+
+ asm volatile(
+ " mrc p15, 0, %0, c1, c0, 0\n"
+ " bic %0, %0, %1\n"
+ " mcr p15, 0, %0, c1, c0, 0\n"
+ : "=&r" (val)
+ : "Ir" (CR_C)
+ : "cc");
+
+
+ asm volatile(
+ /*
+ * Disable L2 prefetch
+ */
+ " mrc p15, 1, %0, c15, c0, 3\n"
+ " orr %0, %0, %1\n"
+ " mcr p15, 1, %0, c15, c0, 3\n"
+ : "=&r" (val)
+ : "Ir" (0x400)
+ : "cc");
+
+ asm volatile(
+ " mrc p15, 1, %0, c15, c0, 4\n"
+ " orr %0, %0, %1\n"
+ " mcr p15, 1, %0, c15, c0, 4\n"
+ : "=&r" (val)
+ : "Ir" (0x1)
+ : "cc");
+
+ isb();
+ dsb();
+
+ flush_cache_all();
+
+ /* Turn the DBG Double Lock quiet */
+ asm volatile(
+ /*
+ * Turn Off the DBGOSDLR.DLK bit
+ */
+ " mrc p14, 0, %0, c1, c3, 4\n"
+ " orr %0, %0, %1\n"
+ " mcr p14, 0, %0, c1, c3, 4\n"
+ : "=&r" (val)
+ : "Ir" (0x1)
+ : "cc");
+
+ /* Switch the processor over to AMP mode out of SMP */
+ asm volatile(
+ " mrc p15, 0, %0, c1, c0, 1\n"
+ " bic %0, %0, %1\n"
+ " mcr p15, 0, %0, c1, c0, 1\n"
+ : "=&r" (val)
+ : "Ir" (0x40)
+ : "cc");
+
+ isb();
+ dsb();
+
+ wfi();
+}
+
+#ifdef CONFIG_HOTPLUG_CPU_LOW_POWER
+static inline void cpu_enter_lowpower_a15(void)
+{
+ unsigned int v;
+
+ asm volatile(
+ " mrc p15, 0, %0, c1, c0, 0\n"
+ " bic %0, %0, %1\n"
+ " mcr p15, 0, %0, c1, c0, 0\n"
+ : "=&r" (v)
+ : "Ir" (CR_C)
+ : "cc");
+
+ flush_cache_all();
+
+ asm volatile(
+ /*
+ * Turn off coherency
+ */
+ " mrc p15, 0, %0, c1, c0, 1\n"
+ " bic %0, %0, %1\n"
+ " mcr p15, 0, %0, c1, c0, 1\n"
+ : "=&r" (v)
+ : "Ir" (0x40)
+ : "cc");
+
+ isb();
+ dsb();
+}
+
+static inline void cpu_leave_lowpower(void)
+{
+ unsigned int v;
+
+ asm volatile(
+ "mrc p15, 0, %0, c1, c0, 0\n"
+ " orr %0, %0, %1\n"
+ " mcr p15, 0, %0, c1, c0, 0\n"
+ " mrc p15, 0, %0, c1, c0, 1\n"
+ " orr %0, %0, %2\n"
+ " mcr p15, 0, %0, c1, c0, 1\n"
+ : "=&r" (v)
+ : "Ir" (CR_C), "Ir" (0x40)
+ : "cc");
+ isb();
+ dsb();
+}
+
+static void __ref platform_do_lowpower(unsigned int cpu, int *spurious)
+{
+ int phys_cpu, cluster;
+
+ /*
+ * there is no power-control hardware on this platform, so all
+ * we can do is put the core into WFI; this is safe as the calling
+ * code will have already disabled interrupts
+ */
+ for (;;) {
+ wfi();
+
+ /*
+ * Convert the "cpu" variable to be compatible with the
+ * ARM MPIDR register format (CLUSTERID and CPUID):
+ *
+ * Bits: |11 10 9 8|7 6 5 4 3 2|1 0
+ * | CLUSTER | Reserved |CPU
+ */
+ phys_cpu = cpu_logical_map(cpu);
+ cluster = (phys_cpu / 4) << 8;
+ phys_cpu = cluster + (phys_cpu % 4);
+
+ if (pen_release == phys_cpu) {
+ /*
+ * OK, proper wakeup, we're done
+ */
+ break;
+ }
+
+ /*
+ * Getting here, means that we have come out of WFI without
+ * having been woken up - this shouldn't happen
+ *
+ * Just note it happening - when we're woken, we can report
+ * its occurrence.
+ */
+ (*spurious)++;
+ }
+}
+#endif
+
+int axxia_platform_cpu_kill(unsigned int cpu)
+{
+
+#ifdef CONFIG_HOTPLUG_CPU_COMPLETE_POWER_DOWN
+ get_cpu();
+ pm_cpu_shutdown(cpu);
+ put_cpu();
+#endif
+ return 1;
+}
+
+/*
+ * platform-specific code to shutdown a CPU
+ *
+ * Called with IRQs disabled
+ */
+
+void axxia_platform_cpu_die(unsigned int cpu)
+{
+#ifdef CONFIG_HOTPLUG_CPU_COMPLETE_POWER_DOWN
+ bool last_cpu;
+
+ last_cpu = pm_cpu_last_of_cluster(cpu);
+ if (last_cpu)
+ pm_L2_logical_shutdown(cpu);
+ else
+ pm_cpu_logical_shutdown(cpu);
+
+ for (;;)
+ wfi();
+
+#else /* CPU low power mode */
+
+ int spurious = 0;
+
+ /*
+ * we're ready for shutdown now, so do it
+ */
+ cpu_enter_lowpower_a15();
+ pm_in_progress[cpu] = true;
+
+ platform_do_lowpower(cpu, &spurious);
+
+ /*
+ * bring this CPU back into the world of cache
+ * coherency, and then restore interrupts
+ */
+ cpu_leave_lowpower();
+
+ if (spurious)
+ pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);
+#endif
+
+}
+
+int platform_cpu_disable(unsigned int cpu)
+{
+
+ /*
+ * we don't allow CPU 0 to be shutdown (it is still too special
+ * e.g. clock tick interrupts)
+ */
+
+ return cpu == 0 ? -EPERM : 0;
+}
diff --git a/arch/arm/mach-axxia/lsi_power_management.c b/arch/arm/mach-axxia/lsi_power_management.c
new file mode 100644
index 0000000..f3ad4f8
--- /dev/null
+++ b/arch/arm/mach-axxia/lsi_power_management.c
@@ -0,0 +1,1400 @@
+/*
+ * linux/arch/arm/mach-axxia/lsi_power_management.c
+ *
+ * C * Created on: Jun 19, 2014
+ * Author: z8cpaul
+ * opyright (C) 2002 ARM Ltd.
+ * All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Created on: Jun 19, 2014
+ * Author: z8cpaul
+ */
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/smp.h>
+#include <linux/delay.h>
+#include <linux/of_address.h>
+#include <asm/exception.h>
+#include <asm/cacheflush.h>
+#include <asm/smp_plat.h>
+#include <asm/cp15.h>
+
+#include "axxia.h"
+#include <mach/axxia-gic.h>
+#include "lsi_power_management.h"
+
+#undef DEBUG_CPU_PM
+
+#define SYSCON_PHYS_ADDR 0x002010030000ULL
+#define DICKENS_PHYS_ADDR 0x2000000000
+
+#define PM_WAIT_TIME (10000)
+#define MAX_CLUSTER (4)
+#define IPI_IRQ_MASK (0xFFFF)
+
+#define CHECK_BIT(var, pos) ((var) & (1 << (pos)))
+
+bool pm_in_progress[16];
+bool cluster_power_up[4];
+
+static const u32 cluster_to_node[MAX_CLUSTER] = { DKN_CLUSTER0_NODE,
+DKN_CLUSTER1_NODE,
+DKN_CLUSTER2_NODE,
+DKN_CLUSTER3_NODE };
+
+static const u32 cluster_to_poreset[MAX_CLUSTER] = {
+PORESET_CLUSTER0,
+PORESET_CLUSTER1,
+PORESET_CLUSTER2,
+PORESET_CLUSTER3 };
+
+static const u32 cluster_to_mask[MAX_CLUSTER] = {
+ IPI0_MASK,
+ IPI1_MASK,
+ IPI2_MASK,
+ IPI3_MASK
+};
+
+static const u32 ipi_register[MAX_IPI] = {
+ NCP_SYSCON_MASK_IPI0,
+ NCP_SYSCON_MASK_IPI1,
+ NCP_SYSCON_MASK_IPI2,
+ NCP_SYSCON_MASK_IPI3,
+ NCP_SYSCON_MASK_IPI4,
+ NCP_SYSCON_MASK_IPI5,
+ NCP_SYSCON_MASK_IPI6,
+ NCP_SYSCON_MASK_IPI7,
+ NCP_SYSCON_MASK_IPI8,
+ NCP_SYSCON_MASK_IPI9,
+ NCP_SYSCON_MASK_IPI10,
+ NCP_SYSCON_MASK_IPI11,
+ NCP_SYSCON_MASK_IPI12,
+ NCP_SYSCON_MASK_IPI13,
+ NCP_SYSCON_MASK_IPI14,
+ NCP_SYSCON_MASK_IPI15,
+ NCP_SYSCON_MASK_IPI16,
+ NCP_SYSCON_MASK_IPI17,
+ NCP_SYSCON_MASK_IPI18
+};
+
+enum pm_error_code {
+ PM_ERR_DICKENS_IOREMAP = 200,
+ PM_ERR_DICKENS_SNOOP_DOMAIN,
+ PM_ERR_FAILED_PWR_DWN_RAM,
+ PM_ERR_FAILED_STAGE_1,
+ PM_ERR_ACK1_FAIL,
+ PM_ERR_RAM_ACK_FAIL,
+ PM_ERR_FAIL_L2ACK,
+ PM_ERR_FAIL_L2HSRAM
+};
+static void __iomem *syscon;
+
+u32 pm_cpu_powered_down;
+
+
+/*======================= LOCAL FUNCTIONS ==============================*/
+static void pm_set_bits_syscon_register(u32 reg, u32 data);
+static void pm_or_bits_syscon_register(u32 reg, u32 data);
+static void pm_clear_bits_syscon_register(u32 reg, u32 data);
+static bool pm_test_for_bit_with_timeout(u32 reg, u32 bit);
+static bool pm_wait_for_bit_clear_with_timeout(u32 reg,
+ u32 bit);
+static void pm_dickens_logical_shutdown(u32 cluster);
+static int pm_dickens_logical_powerup(u32 cluster);
+static int pm_cpu_physical_isolation_and_power_down(int cpu);
+static void pm_L2_isolation_and_power_down(int cluster);
+static int pm_cpu_physical_connection_and_power_up(int cpu);
+static int pm_L2_physical_connection_and_power_up(u32 cluster);
+static int pm_L2_logical_powerup(u32 cluster, u32 cpu);
+
+static bool pm_first_cpu_of_cluster(u32 cpu)
+{
+#ifdef CONFIG_HOTPLUG_CPU_L2_POWER_DOWN
+ u32 count = 0;
+
+ switch (cpu) {
+ case (0):
+ case (1):
+ case (2):
+ case (3):
+ /* This will never happen because cpu 0 will never be turned off */
+ break;
+ case (4):
+ case (5):
+ case (6):
+ case (7):
+ if (pm_cpu_powered_down & (1 << 4))
+ count++;
+ if (pm_cpu_powered_down & (1 << 5))
+ count++;
+ if (pm_cpu_powered_down & (1 << 6))
+ count++;
+ if (pm_cpu_powered_down & (1 << 7))
+ count++;
+ if (count == 4)
+ return true;
+ break;
+ case (8):
+ case (9):
+ case (10):
+ case (11):
+ if (pm_cpu_powered_down & (1 << 8))
+ count++;
+ if (pm_cpu_powered_down & (1 << 9))
+ count++;
+ if (pm_cpu_powered_down & (1 << 10))
+ count++;
+ if (pm_cpu_powered_down & (1 << 11))
+ count++;
+ if (count == 4)
+ return true;
+ break;
+ case (12):
+ case (13):
+ case (14):
+ case (15):
+ if (pm_cpu_powered_down & (1 << 12))
+ count++;
+ if (pm_cpu_powered_down & (1 << 13))
+ count++;
+ if (pm_cpu_powered_down & (1 << 14))
+ count++;
+ if (pm_cpu_powered_down & (1 << 15))
+ count++;
+ if (count == 4)
+ return true;
+ break;
+ default:
+ pr_err("ERROR: the cpu does not exist: %d - %s:%d\n", cpu, __FILE__,
+ __LINE__);
+ break;
+ }
+#endif
+ return false;
+}
+
+bool pm_cpu_last_of_cluster(u32 cpu)
+{
+#ifdef CONFIG_HOTPLUG_CPU_L2_POWER_DOWN
+
+ u32 count = 0;
+
+ switch (cpu) {
+ case (0):
+ case (1):
+ case (2):
+ case (3):
+ /* This will never happen because cpu 0 will never be turned off */
+ break;
+ case (4):
+ case (5):
+ case (6):
+ case (7):
+ if (pm_cpu_powered_down & (1 << 4))
+ count++;
+ if (pm_cpu_powered_down & (1 << 5))
+ count++;
+ if (pm_cpu_powered_down & (1 << 6))
+ count++;
+ if (pm_cpu_powered_down & (1 << 7))
+ count++;
+ if (count == 3)
+ return true;
+ break;
+ case (8):
+ case (9):
+ case (10):
+ case (11):
+ if (pm_cpu_powered_down & (1 << 8))
+ count++;
+ if (pm_cpu_powered_down & (1 << 9))
+ count++;
+ if (pm_cpu_powered_down & (1 << 10))
+ count++;
+ if (pm_cpu_powered_down & (1 << 11))
+ count++;
+ if (count == 3)
+ return true;
+ break;
+ case (12):
+ case (13):
+ case (14):
+ case (15):
+ if (pm_cpu_powered_down & (1 << 12))
+ count++;
+ if (pm_cpu_powered_down & (1 << 13))
+ count++;
+ if (pm_cpu_powered_down & (1 << 14))
+ count++;
+ if (pm_cpu_powered_down & (1 << 15))
+ count++;
+ if (count == 3)
+ return true;
+ break;
+ default:
+ pr_err("ERROR: the cpu does not exist: %d - %s:%d\n", cpu, __FILE__,
+ __LINE__);
+ break;
+ }
+#endif
+ return false;
+}
+
+static void pm_set_bits_syscon_register(u32 reg, u32 data)
+{
+ writel(data, syscon + reg);
+}
+
+static void pm_or_bits_syscon_register(u32 reg, u32 data)
+{
+ u32 tmp;
+
+ tmp = readl(syscon + reg);
+ tmp |= data;
+ writel(tmp, syscon + reg);
+}
+
+
+static void pm_clear_bits_syscon_register(u32 reg, u32 data)
+{
+ u32 tmp;
+
+ tmp = readl(syscon + reg);
+ tmp &= ~(data);
+ writel(tmp, syscon + reg);
+}
+
+static bool pm_test_for_bit_with_timeout(u32 reg, u32 bit)
+{
+
+ u32 tmp = 0;
+ u32 cnt = 0;
+
+ while (cnt < PM_WAIT_TIME) {
+ tmp = readl(syscon + reg);
+ if (CHECK_BIT(tmp, bit))
+ break;
+ cnt++;
+ }
+ if (cnt == PM_WAIT_TIME) {
+ pr_err("reg=0x%x tmp:=0x%x\n", reg, tmp);
+ return false;
+ }
+ return true;
+}
+
+static bool pm_wait_for_bit_clear_with_timeout(u32 reg, u32 bit)
+{
+ u32 cnt = 0;
+ u32 tmp = 0;
+
+ while (cnt < PM_WAIT_TIME) {
+ tmp = readl(syscon + reg);
+ if (!(CHECK_BIT(tmp, bit)))
+ break;
+ cnt++;
+ }
+ if (cnt == PM_WAIT_TIME) {
+ pr_err("reg=0x%x tmp:=0x%x\n", reg, tmp);
+ return false;
+ }
+
+ return true;
+}
+static void pm_dickens_logical_shutdown(u32 cluster)
+{
+ int i;
+ int status;
+ u32 bit;
+ u32 bit_pos;
+ int retries;
+ void __iomem *dickens;
+
+ dickens = ioremap(DICKENS_PHYS_ADDR, SZ_4M);
+ if (dickens == NULL) {
+ pr_err("DICKENS: Failed to map the dickens registers\n");
+ return;
+ }
+
+ bit = (0x01 << cluster_to_node[cluster]);
+ bit_pos = cluster_to_node[cluster];
+
+ for (i = 0; i < DKN_HNF_TOTAL_NODES; ++i) {
+ writel(bit,
+ dickens + (0x10000 * (DKN_HNF_NODE_ID + i))
+ + DKN_HNF_SNOOP_DOMAIN_CTL_CLR);
+
+ retries = PM_WAIT_TIME;
+
+ do {
+ status = readl(
+ dickens + (0x10000 * (DKN_HNF_NODE_ID + i))
+ + DKN_HNF_SNOOP_DOMAIN_CTL);
+ udelay(1);
+ } while ((0 < --retries) && CHECK_BIT(status, bit_pos));
+
+ if (0 == retries) {
+ pr_err("DICKENS: Failed to clear the SNOOP main control. LOOP:%d reg: 0x%x\n", i, status);
+ goto dickens_power_down;
+
+ }
+
+ }
+ /* Clear the domain cluster */
+ writel(bit, dickens + (0x10000 * DKN_DVM_DOMAIN_OFFSET) + DKN_MN_DVM_DOMAIN_CTL_CLR);
+
+ /* Check for complete */
+ retries = PM_WAIT_TIME;
+
+ do {
+ status = readl(
+ dickens + (0x10000 * DKN_DVM_DOMAIN_OFFSET)
+ + DKN_MN_DVM_DOMAIN_CTL);
+ udelay(1);
+ } while ((0 < --retries) && CHECK_BIT(status, bit_pos));
+
+ if (0 == retries) {
+ pr_err("DICKENS: failed to set DOMAIN OFFSET Reg=0x%x\n", status);
+ goto dickens_power_down;
+
+ }
+
+dickens_power_down:
+ iounmap(dickens);
+}
+
+static int pm_dickens_logical_powerup(u32 cluster)
+{
+ int i;
+ u32 status;
+ u32 bit;
+ u32 bit_pos;
+ int retries;
+ int rval = 0;
+
+ void __iomem *dickens = ioremap(DICKENS_PHYS_ADDR, SZ_4M);
+
+ if (dickens == NULL) {
+ pr_err("Failed to map dickens registers\n");
+ return -PM_ERR_DICKENS_IOREMAP;
+ }
+
+ bit = (0x01 << cluster_to_node[cluster]);
+ bit_pos = cluster_to_node[cluster];
+
+ for (i = 0; i < DKN_HNF_TOTAL_NODES; ++i) {
+ writel(bit,
+ dickens + (0x10000 * (DKN_HNF_NODE_ID + i))
+ + DKN_HNF_SNOOP_DOMAIN_CTL_SET);
+
+ retries = PM_WAIT_TIME;
+
+ do {
+ status = readl(
+ dickens + (0x10000 * (DKN_HNF_NODE_ID + i))
+ + DKN_HNF_SNOOP_DOMAIN_CTL);
+ udelay(1);
+ } while ((0 < --retries) && !CHECK_BIT(status, bit_pos));
+
+ if (0 == retries) {
+ pr_err("DICKENS: Failed on the SNOOP DONAIN\n");
+ rval = -PM_ERR_DICKENS_SNOOP_DOMAIN;
+ goto dickens_power_up;
+ }
+
+ }
+
+ /* Clear the domain cluster */
+ writel(bit, dickens + (0x10000 * DKN_DVM_DOMAIN_OFFSET) + DKN_MN_DVM_DOMAIN_CTL_SET);
+
+ /* Check for complete */
+ retries = PM_WAIT_TIME;
+
+ do {
+ status = readl(
+ dickens + (0x10000 * DKN_DVM_DOMAIN_OFFSET)
+ + DKN_MN_DVM_DOMAIN_CTL);
+ udelay(1);
+ } while ((0 < --retries) && !CHECK_BIT(status, bit_pos));
+
+ if (0 == retries) {
+ pr_err("DICKENS: Failed on the SNOOP DONAIN CTL SET\n");
+ rval = -PM_ERR_DICKENS_SNOOP_DOMAIN;
+ goto dickens_power_up;
+ }
+
+dickens_power_up:
+ iounmap(dickens);
+
+ return rval;
+}
+
+static void pm_disable_ipi_interrupts(u32 cpu)
+{
+ pm_clear_bits_syscon_register(ipi_register[cpu], IPI_IRQ_MASK);
+}
+
+static void pm_enable_ipi_interrupts(u32 cpu)
+{
+
+ u32 i;
+ u32 powered_on_cpu = (~(pm_cpu_powered_down) & IPI_IRQ_MASK);
+
+ pm_set_bits_syscon_register(ipi_register[cpu], powered_on_cpu);
+
+ for (i = 0; i < MAX_CPUS; i++) {
+ if ((1 << i) & powered_on_cpu)
+ pm_or_bits_syscon_register(ipi_register[i], (1 << cpu));
+ }
+}
+
+void pm_init_syscon(void)
+{
+ syscon = ioremap(SYSCON_PHYS_ADDR, SZ_64K);
+}
+
+bool pm_cpu_active(u32 cpu)
+{
+
+ bool success = false;
+ u32 reg;
+
+ reg = readl(syscon + NCP_SYSCON_PWR_QACTIVE);
+ if (reg & (1 << cpu))
+ success = true;
+
+ return success;
+
+}
+
+void pm_cpu_shutdown(u32 cpu)
+{
+
+ bool success;
+ u32 reqcpu = cpu_logical_map(cpu);
+ u32 cluster = reqcpu / CORES_PER_CLUSTER;
+ u32 cluster_mask = (0x01 << cluster);
+ bool last_cpu;
+ int rval = 0;
+
+ /* Check to see if the cpu is powered up */
+ if (pm_cpu_powered_down & (1 << reqcpu)) {
+ pr_err("CPU %d is already powered off - %s:%d\n", cpu, __FILE__, __LINE__);
+ return;
+ }
+
+ pm_init_syscon();
+
+ /*
+ * Is this the last cpu of a cluster then turn off the L2 cache
+ * along with the CPU.
+ */
+ last_cpu = pm_cpu_last_of_cluster(reqcpu);
+ if (last_cpu) {
+
+ /* Disable all the interrupts to the cluster gic */
+ pm_or_bits_syscon_register(NCP_SYSCON_GIC_DISABLE, cluster_mask);
+
+ /* Remove the cluster from the Dickens coherency domain */
+ pm_dickens_logical_shutdown(cluster);
+
+ /* Power down the cpu */
+ pm_cpu_physical_isolation_and_power_down(reqcpu);
+
+ pm_clear_bits_syscon_register(NCP_SYSCON_PWR_CSYSREQ_CNT, cluster_mask);
+ success = pm_wait_for_bit_clear_with_timeout(NCP_SYSCON_PWR_CACTIVE_CNT, cluster);
+ if (!success) {
+ pr_err(
+ "Failed to keep other cluster count going on cluster %d: %s-%d\n",
+ cluster, __FILE__, __LINE__);
+ goto pm_shutdown_exit;
+ }
+
+ /* Turn off the ACE */
+ pm_or_bits_syscon_register(NCP_SYSCON_PWR_ACEPWRDNRQ, cluster_mask);
+
+ /* Wait for ACE to complete power off */
+ success = pm_wait_for_bit_clear_with_timeout(NCP_SYSCON_PWR_NACEPWRDNACK, cluster);
+ if (!success) {
+ pr_err("Failed to power off ACE on cluster %d: %s-%d\n",
+ cluster, __FILE__, __LINE__);
+ goto pm_shutdown_exit;
+ }
+
+ /* Isolate the cluster */
+ pm_or_bits_syscon_register(NCP_SYSCON_PWR_ISOLATEL2MISC, cluster_mask);
+
+ /* Wait for WFI L2 to go to standby */
+ success = pm_test_for_bit_with_timeout(NCP_SYSCON_PWR_STANDBYWFIL2, cluster);
+ if (!success) {
+ pr_err("Failed to enter L2 WFI on cluster %d: %s-%d\n",
+ cluster, __FILE__, __LINE__);
+ goto pm_shutdown_exit;
+ }
+
+ /* Power off the L2 */
+ pm_L2_isolation_and_power_down(cluster);
+ if (rval == 0) {
+ pr_info("CPU %d is powered down with cluster: %d\n", reqcpu, cluster);
+ pm_cpu_powered_down |= (1 << reqcpu);
+ } else
+ pr_err("CPU %d failed to power down\n", reqcpu);
+
+
+ } else {
+
+ rval = pm_cpu_physical_isolation_and_power_down(reqcpu);
+ if (rval == 0)
+ pm_cpu_powered_down |= (1 << reqcpu);
+ else
+ pr_err("CPU %d failed to power down\n", reqcpu);
+ }
+
+pm_shutdown_exit:
+ iounmap(syscon);
+}
+
+int pm_cpu_powerup(u32 cpu)
+{
+
+ bool first_cpu;
+ int rval = 0;
+ u32 cpu_mask = (0x01 << cpu);
+
+ u32 reqcpu = cpu_logical_map(cpu);
+ u32 cluster = reqcpu / CORES_PER_CLUSTER;
+ u32 cluster_mask = (0x01 << cluster);
+
+ pm_init_syscon();
+
+ /*
+ * Is this the first cpu of a cluster to come back on?
+ * Then power up the L2 cache.
+ */
+ first_cpu = pm_first_cpu_of_cluster(cpu);
+ if (first_cpu) {
+
+ rval = pm_L2_logical_powerup(cluster, cpu);
+ if (rval) {
+ pr_err("CPU: Failed the logical L2 power up\n");
+ goto pm_power_up;
+ }
+ cluster_power_up[cluster] = true;
+ pm_clear_bits_syscon_register(NCP_SYSCON_GIC_DISABLE, cluster_mask);
+
+
+ } else {
+ /* Set the CPU into reset */
+ pm_set_bits_syscon_register(NCP_SYSCON_KEY, VALID_KEY_VALUE);
+ pm_or_bits_syscon_register(NCP_SYSCON_PWRUP_CPU_RST, cpu_mask);
+
+ }
+
+
+ /*
+ * Power up the CPU
+ */
+ rval = pm_cpu_physical_connection_and_power_up(cpu);
+ if (rval) {
+ pr_err("Failed to power up physical connection of cpu: %d\n", cpu);
+ goto pm_power_up;
+ }
+
+ /*
+ * The key value must be written before the CPU RST can be written.
+ */
+ pm_set_bits_syscon_register(NCP_SYSCON_KEY, VALID_KEY_VALUE);
+ pm_clear_bits_syscon_register(NCP_SYSCON_PWRUP_CPU_RST, cpu_mask);
+
+ /*
+ * Clear the powered down mask
+ */
+ pm_cpu_powered_down &= ~(1 << cpu);
+
+ /* Enable the CPU IPI */
+ pm_enable_ipi_interrupts(cpu);
+
+
+
+pm_power_up:
+ iounmap(syscon);
+ return rval;
+}
+
+unsigned long pm_get_powered_down_cpu(void)
+{
+ return pm_cpu_powered_down;
+}
+
+
+inline void pm_cpu_logical_powerup(void)
+{
+ unsigned int v;
+
+ asm volatile(
+ " mrc p15, 0, %0, c1, c0, 0\n"
+ " orr %0, %0, %1\n"
+ " mcr p15, 0, %0, c1, c0, 0\n"
+ " mrc p15, 0, %0, c1, c0, 0\n"
+ " orr %0, %0, %2\n"
+ " mcr p15, 0, %0, c1, c0, 0\n"
+ : "=&r" (v)
+ : "Ir" (CR_C), "Ir" (CR_I)
+ : "cc");
+
+ /*
+ * Iniitalize the ACTLR2 register (all cores).
+ */
+
+ asm volatile(
+ " mrc p15, 1, %0, c15, c0, 4\n"
+ " bic %0, %0, %1\n"
+ " mcr p15, 1, %0, c15, c0, 4\n"
+ : "=&r" (v)
+ : "Ir" (0x1)
+ : "cc");
+
+ isb();
+ dsb();
+}
+
+inline void pm_cluster_logical_powerup(void)
+{
+ unsigned int v;
+
+ /*
+ * Initialize the L2CTLR register (primary core in each cluster).
+ */
+ asm volatile(
+ " mrc p15, 1, %0, c9, c0, 2\n"
+ " orr %0, %0, %1\n"
+ " orr %0, %0, %2\n"
+ " mcr p15, 1, %0, c9, c0, 2"
+ : "=&r" (v)
+ : "Ir" (0x01), "Ir" (0x1 << 21)
+ : "cc");
+ isb();
+ dsb();
+
+ /*
+ * Initialize the L2ACTLR register (primary core in each cluster).
+ */
+ asm volatile(
+ " mrc p15, 1, r0, c15, c0, 0\n"
+ " orr %0, %0, %1\n"
+ " orr %0, %0, %2\n"
+ " orr %0, %0, %3\n"
+ " orr %0, %0, %4\n"
+ " orr %0, %0, %5\n"
+ " mcr p15, 1, %0, c15, c0, 0"
+ : "=&r" (v)
+ : "Ir" (0x1 << 3), "Ir" (0x1 << 7), "Ir" (0x1 << 12), "Ir" (0x1 << 13), "Ir" (0x1 << 14)
+ : "cc");
+ isb();
+ dsb();
+
+}
+
+static int pm_cpu_physical_isolation_and_power_down(int cpu)
+{
+
+ int rval = 0;
+
+ bool success;
+ u32 mask = (0x01 << cpu);
+
+ /* Disable the CPU IPI */
+ pm_disable_ipi_interrupts(cpu);
+
+ /* Initiate power down of the CPU's HS Rams */
+ pm_clear_bits_syscon_register(NCP_SYSCON_PWR_PWRUPCPURAM, mask);
+
+ /* Wait until the RAM power down is complete */
+ success = pm_test_for_bit_with_timeout(NCP_SYSCON_PWR_NPWRUPCPURAM_ACK, cpu);
+ if (!success) {
+ rval = -PM_ERR_FAILED_PWR_DWN_RAM;
+ pr_err("CPU: Failed to power down CPU RAM\n");
+ goto power_down_cleanup;
+ }
+
+ /* Activate the CPU's isolation clamps */
+ pm_or_bits_syscon_register(NCP_SYSCON_PWR_ISOLATECPU, mask);
+
+ /* Initiate power down of the CPU logic */
+ pm_clear_bits_syscon_register(NCP_SYSCON_PWR_PWRUPCPUSTG2, mask);
+
+ udelay(16);
+
+ /* Continue power down of the CPU logic */
+ pm_clear_bits_syscon_register(NCP_SYSCON_PWR_PWRUPCPUSTG1, mask);
+
+ success = pm_test_for_bit_with_timeout(NCP_SYSCON_PWR_NPWRUPCPUSTG1_ACK, cpu);
+ if (!success) {
+ rval = -PM_ERR_FAILED_STAGE_1;
+ pr_err("CPU: Failed to power down stage 1 cpu\n");
+ goto power_down_cleanup;
+ }
+
+power_down_cleanup:
+
+ return rval;
+}
+
+static int pm_cpu_physical_connection_and_power_up(int cpu)
+{
+ int rval = 0;
+
+ bool success;
+ u32 mask = (0x01 << cpu);
+
+ /* Initiate power up of the CPU */
+ pm_or_bits_syscon_register(NCP_SYSCON_PWR_PWRUPCPUSTG1, mask);
+
+ /* Wait until CPU logic power is compete */
+ success = pm_wait_for_bit_clear_with_timeout(NCP_SYSCON_PWR_NPWRUPCPUSTG1_ACK, cpu);
+ if (!success) {
+ rval = -PM_ERR_ACK1_FAIL;
+ pr_err("CPU: Failed to get ACK from power down stage 1\n");
+ goto power_up_cleanup;
+ }
+
+ /* Continue stage 2 power up of the CPU*/
+ pm_or_bits_syscon_register(NCP_SYSCON_PWR_PWRUPCPUSTG2, mask);
+
+ udelay(16);
+
+ /* Initiate power up of HS Rams */
+ pm_or_bits_syscon_register(NCP_SYSCON_PWR_PWRUPCPURAM, mask);
+
+ /* Wait until the RAM power up is complete */
+ success = pm_wait_for_bit_clear_with_timeout(NCP_SYSCON_PWR_NPWRUPCPURAM_ACK, cpu);
+ if (!success) {
+ rval = -PM_ERR_RAM_ACK_FAIL;
+ pr_err("CPU: Failed to get ACK of power power up\n");
+ goto power_up_cleanup;
+ }
+
+ /* Release the CPU's isolation clamps */
+ pm_clear_bits_syscon_register(NCP_SYSCON_PWR_ISOLATECPU, mask);
+
+ udelay(16);
+
+power_up_cleanup:
+
+
+ return rval;
+
+}
+/*========================================== L2 FUNCTIONS ========================================*/
+
+static void pm_L2_isolation_and_power_down(int cluster)
+{
+
+ u32 mask = (0x1 << cluster);
+
+ /* Enable the chip select for the cluster */
+ pm_or_bits_syscon_register(NCP_SYSCON_PWR_CHIPSELECTEN, mask);
+
+ /* Disable the hsram */
+ pm_clear_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL2HSRAM, mask);
+
+ switch (cluster) {
+ case (0):
+
+#ifdef PM_POWER_OFF_ONLY_DATARAM
+ pm_clear_bits_syscon_register(syscon,
+ NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM1, RAM_BANK0_MASK);
+ udelay(20);
+ pm_clear_bits_syscon_register(syscon,
+ NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM1, RAM_BANK1_LS_MASK);
+ pm_clear_bits_syscon_register(syscon,
+ NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM2, RAM_BANK1_MS_MASK);
+ udelay(20);
+ pm_clear_bits_syscon_register(syscon,
+ NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM2, RAM_BANK2_MASK);
+ udelay(20);
+ pm_clear_bits_syscon_register(syscon,
+ NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM2, RAM_BANK3_MASK);
+ udelay(20);
+#else
+ pm_clear_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM2, RAM_ALL_MASK);
+ udelay(20);
+ pm_clear_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM1, RAM_ALL_MASK);
+ udelay(20);
+ pm_clear_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM0, RAM_ALL_MASK);
+ udelay(20);
+
+#endif
+ break;
+ case (1):
+
+#ifdef PM_POWER_OFF_ONLY_DATARAM
+
+ pm_clear_bits_syscon_register(syscon,
+ NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM1, RAM_BANK0_MASK);
+ udelay(20);
+ pm_clear_bits_syscon_register(syscon,
+ NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM1, RAM_BANK1_LS_MASK);
+ pm_clear_bits_syscon_register(syscon,
+ NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM2, RAM_BANK1_MS_MASK);
+ udelay(20);
+ pm_clear_bits_syscon_register(syscon,
+ NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM2, RAM_BANK2_MASK);
+ udelay(20);
+ pm_clear_bits_syscon_register(syscon,
+ NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM2, RAM_BANK3_MASK);
+ udelay(20);
+#else
+ pm_clear_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM2, RAM_ALL_MASK);
+ udelay(20);
+ pm_clear_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM1, RAM_ALL_MASK);
+ udelay(20);
+ pm_clear_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM0, RAM_ALL_MASK);
+ udelay(20);
+#endif
+ break;
+ case (2):
+
+#ifdef PM_POWER_OFF_ONLY_DATARAM
+
+ pm_clear_bits_syscon_register(syscon,
+ NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM1, RAM_BANK0_MASK);
+ udelay(20);
+ pm_clear_bits_syscon_register(syscon,
+ NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM1, RAM_BANK1_LS_MASK);
+ pm_clear_bits_syscon_register(syscon,
+ NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM2, RAM_BANK1_MS_MASK);
+ udelay(20);
+ pm_clear_bits_syscon_register(syscon,
+ NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM2, RAM_BANK2_MASK);
+ udelay(20);
+ pm_clear_bits_syscon_register(syscon,
+ NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM2, RAM_BANK3_MASK);
+ udelay(20);
+#else
+ pm_clear_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM2, RAM_ALL_MASK);
+ udelay(20);
+ pm_clear_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM1, RAM_ALL_MASK);
+ udelay(20);
+ pm_clear_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM0, RAM_ALL_MASK);
+ udelay(20);
+#endif
+ break;
+ case (3):
+
+#ifdef PM_POWER_OFF_ONLY_DATARAM
+
+ pm_clear_bits_syscon_register(syscon,
+ NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM1, RAM_BANK0_MASK);
+ udelay(20);
+ pm_clear_bits_syscon_register(syscon,
+ NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM1, RAM_BANK1_LS_MASK);
+ pm_clear_bits_syscon_register(syscon,
+ NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM2, RAM_BANK1_MS_MASK);
+ udelay(20);
+ pm_clear_bits_syscon_register(syscon,
+ NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM2, RAM_BANK2_MASK);
+ udelay(20);
+ pm_clear_bits_syscon_register(syscon,
+ NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM2, RAM_BANK3_MASK);
+ udelay(20);
+#else
+ pm_clear_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM2, RAM_ALL_MASK);
+ udelay(20);
+ pm_clear_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM1, RAM_ALL_MASK);
+ udelay(20);
+ pm_clear_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM0, RAM_ALL_MASK);
+ udelay(20);
+#endif
+ break;
+ default:
+ pr_err("Illegal cluster: %d > 3\n", cluster);
+ break;
+ }
+
+ /* Power down stage 2 */
+ pm_clear_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL2LGCSTG2, mask);
+
+ /* Power down stage 1 */
+ pm_clear_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL2LGCSTG1, mask);
+
+}
+
+static int pm_L2_physical_connection_and_power_up(u32 cluster)
+{
+
+ bool success;
+ u32 mask = (0x1 << cluster);
+ int rval = 0;
+
+ /* Power up stage 1 */
+ pm_or_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL2LGCSTG1, mask);
+
+ /* Wait for the stage 1 power up to complete */
+ success = pm_wait_for_bit_clear_with_timeout(NCP_SYSCON_PWR_NPWRUPL2LGCSTG1_ACK, cluster);
+ if (!success) {
+ pr_err("CPU: Failed to ack the L2 Stage 1 Power up\n");
+ rval = -PM_ERR_FAIL_L2ACK;
+ goto power_up_l2_cleanup;
+ }
+
+ /* Power on stage 2 */
+ pm_or_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL2LGCSTG2, mask);
+
+ /* Set the chip select */
+ pm_or_bits_syscon_register(NCP_SYSCON_PWR_CHIPSELECTEN, mask);
+
+ /* Power up the snoop ram */
+ pm_or_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL2HSRAM, mask);
+
+ /* Wait for the stage 1 power up to complete */
+ success = pm_wait_for_bit_clear_with_timeout(NCP_SYSCON_PWR_NPWRUPL2HSRAM_ACK, cluster);
+ if (!success) {
+ pr_err("CPU: failed to get the HSRAM power up ACK\n");
+ rval = -PM_ERR_FAIL_L2HSRAM;
+ goto power_up_l2_cleanup;
+ }
+
+ switch (cluster) {
+ case (0):
+
+#ifdef PM_POWER_OFF_ONLY_DATARAM
+ pm_set_bits_syscon_register(syscon,
+ NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM1, RAM_BANK0_MASK);
+ udelay(20);
+ pm_set_bits_syscon_register(syscon,
+ NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM1, RAM_BANK1_LS_MASK);
+ pm_set_bits_syscon_register(syscon,
+ NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM2, RAM_BANK1_MS_MASK);
+ udelay(20);
+ pm_set_bits_syscon_register(syscon,
+ NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM2, RAM_BANK2_MASK);
+ udelay(20);
+ pm_set_bits_syscon_register(syscon,
+ NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM2, RAM_BANK3_MASK);
+ udelay(20);
+#else
+ pm_set_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM2, RAM_ALL_MASK);
+ udelay(20);
+ pm_set_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM1, RAM_ALL_MASK);
+ udelay(20);
+ pm_set_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM0, RAM_ALL_MASK);
+ udelay(20);
+
+#endif
+ break;
+ case (1):
+
+#ifdef PM_POWER_OFF_ONLY_DATARAM
+
+ pm_set_bits_syscon_register(syscon,
+ NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM1, RAM_BANK0_MASK);
+ udelay(20);
+ pm_set_bits_syscon_register(syscon,
+ NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM1, RAM_BANK1_LS_MASK);
+ pm_set_bits_syscon_register(syscon,
+ NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM2, RAM_BANK1_MS_MASK);
+ udelay(20);
+ pm_set_bits_syscon_register(syscon,
+ NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM2, RAM_BANK2_MASK);
+ udelay(20);
+ pm_set_bits_syscon_register(syscon,
+ NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM2, RAM_BANK3_MASK);
+ udelay(20);
+#else
+ pm_set_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM2, RAM_ALL_MASK);
+ udelay(20);
+ pm_set_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM1, RAM_ALL_MASK);
+ udelay(20);
+ pm_set_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM0, RAM_ALL_MASK);
+ udelay(20);
+#endif
+ break;
+ case (2):
+
+#ifdef PM_POWER_OFF_ONLY_DATARAM
+
+ pm_set_bits_syscon_register(syscon,
+ NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM1, RAM_BANK0_MASK);
+ udelay(20);
+ pm_set_bits_syscon_register(syscon,
+ NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM1, RAM_BANK1_LS_MASK);
+ pm_set_bits_syscon_register(syscon,
+ NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM2, RAM_BANK1_MS_MASK);
+ udelay(20);
+ pm_set_bits_syscon_register(syscon,
+ NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM2, RAM_BANK2_MASK);
+ udelay(20);
+ pm_set_bits_syscon_register(syscon,
+ NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM2, RAM_BANK3_MASK);
+ udelay(20);
+#else
+ pm_set_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM2, RAM_ALL_MASK);
+ udelay(20);
+ pm_set_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM1, RAM_ALL_MASK);
+ udelay(20);
+ pm_set_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM0, RAM_ALL_MASK);
+ udelay(20);
+#endif
+ break;
+ case (3):
+
+#ifdef PM_POWER_OFF_ONLY_DATARAM
+
+ pm_set_bits_syscon_register(syscon,
+ NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM1, RAM_BANK0_MASK);
+ udelay(20);
+ pm_set_bits_syscon_register(syscon,
+ NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM1, RAM_BANK1_LS_MASK);
+ pm_set_bits_syscon_register(syscon,
+ NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM2, RAM_BANK1_MS_MASK);
+ udelay(20);
+ pm_set_bits_syscon_register(syscon,
+ NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM2, RAM_BANK2_MASK);
+ udelay(20);
+ pm_set_bits_syscon_register(syscon,
+ NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM2, RAM_BANK3_MASK);
+ udelay(20);
+#else
+ pm_set_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM2, RAM_ALL_MASK);
+ udelay(20);
+ pm_set_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM1, RAM_ALL_MASK);
+ udelay(20);
+ pm_set_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM0, RAM_ALL_MASK);
+ udelay(20);
+#endif
+ break;
+ default:
+ pr_err("Illegal cluster: %d > 3\n", cluster);
+ break;
+ }
+
+ /* Clear the chip select */
+ pm_clear_bits_syscon_register(NCP_SYSCON_PWR_CHIPSELECTEN, mask);
+
+ /* Release the isolation clamps */
+ pm_clear_bits_syscon_register(NCP_SYSCON_PWR_ISOLATEL2MISC, mask);
+
+ /* Turn the ACE bridge power on*/
+ pm_clear_bits_syscon_register(NCP_SYSCON_PWR_ACEPWRDNRQ, mask);
+
+power_up_l2_cleanup:
+ return rval;
+}
+
+static int pm_L2_logical_powerup(u32 cluster, u32 cpu)
+{
+
+ u32 mask = (0x1 << cluster);
+ int rval = 0;
+ u32 cluster_mask;
+
+ if (cluster == 0)
+ cluster_mask = 0xe;
+ else
+ cluster_mask = 0xf << (cluster * 4);
+
+ /* put the cluster into a cpu hold */
+ pm_or_bits_syscon_register(NCP_SYSCON_RESET_AXIS,
+ cluster_to_poreset[cluster]);
+
+ /*
+ * The key value has to be written before the CPU RST can be written.
+ */
+ pm_set_bits_syscon_register(NCP_SYSCON_KEY, VALID_KEY_VALUE);
+ pm_or_bits_syscon_register(NCP_SYSCON_PWRUP_CPU_RST, cluster_mask);
+
+ /* Hold the chip debug cluster */
+ pm_set_bits_syscon_register(NCP_SYSCON_KEY, VALID_KEY_VALUE);
+ pm_or_bits_syscon_register(NCP_SYSCON_HOLD_DBG, mask);
+
+ /* Hold the L2 cluster */
+ pm_set_bits_syscon_register(NCP_SYSCON_KEY, VALID_KEY_VALUE);
+ pm_or_bits_syscon_register(NCP_SYSCON_HOLD_L2, mask);
+
+
+ /* Cluster physical power up */
+ rval = pm_L2_physical_connection_and_power_up(cluster);
+ if (rval)
+ goto exit_pm_L2_logical_powerup;
+
+ udelay(16);
+
+ /* take the cluster out of a cpu hold */
+ pm_clear_bits_syscon_register(NCP_SYSCON_RESET_AXIS,
+ cluster_to_poreset[cluster]);
+
+ udelay(64);
+
+ /* Enable the system counter */
+ pm_or_bits_syscon_register(NCP_SYSCON_PWR_CSYSREQ_CNT, mask);
+
+ /* Release the L2 cluster */
+ pm_set_bits_syscon_register(NCP_SYSCON_KEY, VALID_KEY_VALUE);
+ pm_clear_bits_syscon_register(NCP_SYSCON_HOLD_L2, mask);
+
+ /* Release the chip debug cluster */
+ pm_set_bits_syscon_register(NCP_SYSCON_KEY, VALID_KEY_VALUE);
+ pm_clear_bits_syscon_register(NCP_SYSCON_HOLD_DBG, mask);
+
+ /* Power up the dickens */
+ rval = pm_dickens_logical_powerup(cluster);
+ if (rval)
+ goto exit_pm_L2_logical_powerup;
+
+ /* start L2 */
+ pm_clear_bits_syscon_register(NCP_SYSCON_PWR_ACINACTM, mask);
+
+exit_pm_L2_logical_powerup:
+
+ return rval;
+
+}
+
+#ifdef DEBUG_CPU_PM
+
+void pm_debug_read_pwr_registers(void)
+{
+ u32 reg;
+
+ reg = readl(syscon + 0x1400);
+ pr_err("NCP_SYSCON_PWR_CLKEN: 0x%x\n", reg);
+ reg = readl(syscon + NCP_SYSCON_PWR_ACINACTM);
+ pr_err("NCP_SYSCON_PWR_ACINACTM: 0x%x\n", reg);
+ reg = readl(syscon + 0x140c);
+ pr_err("NCP_SYSCON_PWR_CHIPSELECTEN: 0x%x\n", reg);
+ reg = readl(syscon + 0x1410);
+ pr_err("NCP_SYSCON_PWR_CSYSREQ_TS: 0x%x\n", reg);
+ reg = readl(syscon + 0x1414);
+ pr_err("NCP_SYSCON_PWR_CSYSREQ_CNT: 0x%x\n", reg);
+ reg = readl(syscon + 0x1418);
+ pr_err("NCP_SYSCON_PWR_CSYSREQ_ATB: 0x%x\n", reg);
+ reg = readl(syscon + 0x141c);
+ pr_err("NCP_SYSCON_PWR_CSYSREQ_APB: 0x%x\n", reg);
+ reg = readl(syscon + 0x1420);
+ pr_err("NCP_SYSCON_PWR_PWRUPL2LGCSTG1: 0x%x\n", reg);
+ reg = readl(syscon + 0x1424);
+ pr_err("NCP_SYSCON_PWR_PWRUPL2LGCSTG2: 0x%x\n", reg);
+ reg = readl(syscon + 0x1428);
+ pr_err("NCP_SYSCON_PWR_PWRUPL2HSRAM: 0x%x\n", reg);
+ reg = readl(syscon + 0x142c);
+ pr_err("NCP_SYSCON_PWR_ACEPWRDNRQ: 0x%x\n", reg);
+ reg = readl(syscon + 0x1430);
+ pr_err("NCP_SYSCON_PWR_ISOLATEL2MIS: 0x%x\n", reg);
+ reg = readl(syscon + 0x1438);
+ pr_err("NCP_SYSCON_PWR_NPWRUPL2LGCSTG1_ACK: 0x%x\n", reg);
+ reg = readl(syscon + 0x143c);
+ pr_err("NCP_SYSCON_PWR_NPWRUPL2HSRAM_ACK: 0x%x\n", reg);
+ reg = readl(syscon + 0x1440);
+ pr_err("NCP_SYSCON_PWR_STANDBYWFIL2: 0x%x\n", reg);
+ reg = readl(syscon + 0x1444);
+ pr_err("NCP_SYSCON_PWR_CSYSACK_TS: 0x%x\n", reg);
+ reg = readl(syscon + 0x1448);
+ pr_err("NCP_SYSCON_PWR_CACTIVE_TS: 0x%x\n", reg);
+ reg = readl(syscon + 0x144c);
+ pr_err("NCP_SYSCON_PWR_CSYSACK_CNT: 0x%x\n", reg);
+ reg = readl(syscon + 0x1450);
+ pr_err("NCP_SYSCON_PWR_CACTIVE_CNT: 0x%x\n", reg);
+ reg = readl(syscon + 0x1454);
+ pr_err("NCP_SYSCON_PWR_CSYSACK_ATB: 0x%x\n", reg);
+ reg = readl(syscon + 0x1458);
+ pr_err("NCP_SYSCON_PWR_CACTIVE_ATB: 0x%x\n", reg);
+ reg = readl(syscon + 0x145c);
+ pr_err("NCP_SYSCON_PWR_CSYSACK_APB: 0x%x\n", reg);
+ reg = readl(syscon + 0x1460);
+ pr_err("NCP_SYSCON_PWR_CACTIVE_APB: 0x%x\n", reg);
+ reg = readl(syscon + 0x1464);
+ pr_err("NCP_SYSCON_PWR_NACEPWRDNACK: 0x%x\n", reg);
+ reg = readl(syscon + 0x1468);
+ pr_err("NCP_SYSCON_PWR_CACTIVEM_EAGM: 0x%x\n", reg);
+ reg = readl(syscon + 0x146c);
+ pr_err("NCP_SYSCON_PWR_CACTIVEM_EAGS: 0x%x\n", reg);
+ reg = readl(syscon + 0x1470);
+ pr_err("NCP_SYSCON_PWR_CACTIVES_EAGM: 0x%x\n", reg);
+ reg = readl(syscon + 0x1474);
+ pr_err("NCP_SYSCON_PWR_CACTIVES_EAGS: 0x%x\n", reg);
+ reg = readl(syscon + 0x1480);
+ pr_err("NCP_SYSCON_PWR_PWRUPCPUSTG1: 0x%x\n", reg);
+ reg = readl(syscon + 0x1484);
+ pr_err("NCP_SYSCON_PWR_PWRUPCPUSTG2: 0x%x\n", reg);
+ reg = readl(syscon + 0x1488);
+ pr_err("NCP_SYSCON_PWR_PWRUPCPURAM: 0x%x\n", reg);
+ reg = readl(syscon + 0x148c);
+ pr_err("NCP_SYSCON_PWR_ISOLATECPU: 0x%x\n", reg);
+ reg = readl(syscon + 0x1490);
+ pr_err("NCP_SYSCON_PWR_NPWRUPCPUSTG1_ACK: 0x%x\n", reg);
+ reg = readl(syscon + 0x1494);
+ pr_err("NCP_SYSCON_PWR_NPWRUPCPURAM_ACK: 0x%x\n", reg);
+ reg = readl(syscon + 0x1498);
+ pr_err("NCP_SYSCON_PWR_QACTIVE: 0x%x\n", reg);
+ reg = readl(syscon + 0x149C);
+ pr_err("NCP_SYSCON_PWR_STANDBYWFI: 0x%x\n", reg);
+ reg = readl(syscon + 0x14A0);
+ pr_err("NCP_SYSCON_PWR_STANDBYWFE: 0x%x\n", reg);
+ reg = readl(syscon + 0x14A4);
+ pr_err("NCP_SYSCON_PWR_DBGNOPWRDWN: 0x%x\n", reg);
+ reg = readl(syscon + 0x14A8);
+ pr_err("NCP_SYSCON_PWR_DBGPWRUPREQ: 0x%x\n", reg);
+ reg = readl(syscon + 0x1040);
+ pr_err("NCP_SYSCON_RESET_AXIS: 0x%x\n", reg);
+ reg = readl(syscon + 0x1044);
+ pr_err("NCP_SYSCON_RESET_AXIS-WORD1: 0x%x\n", reg);
+ reg = readl(syscon + NCP_SYSCON_RESET_CPU);
+ pr_err("NCP_SYSCON_RESET_CPU: 0x%x\n", reg);
+ reg = readl(syscon + NCP_SYSCON_HOLD_DBG);
+ pr_err("NCP_SYSCON_HOLD_DBG: 0x%x\n", reg);
+ reg = readl(syscon + NCP_SYSCON_HOLD_L2);
+ pr_err("NCP_SYSCON_HOLD_L2: 0x%x\n", reg);
+ reg = readl(syscon + NCP_SYSCON_HOLD_CPU);
+ pr_err("NCP_SYSCON_HOLD_CPU: 0x%x\n", reg);
+ reg = readl(syscon + NCP_SYSCON_PWRUP_CPU_RST);
+ pr_err("NCP_SYSCON_PWRUP_CPU_RST: 0x%x\n", reg);
+ reg = readl(syscon + NCP_SYSCON_RESET_STATUS);
+ pr_err("NCP_SYSCON_RESET_STATUS: 0x%x\n", reg);
+ reg = readl(syscon + NCP_SYSCON_RESET_CORE_STATUS);
+ pr_err("NCP_SYSCON_RESET_CORE_STATUS: 0x%x\n", reg);
+
+
+#if 0
+ reg = readl(syscon + NCP_SYSCON_MCG_CSW_CPU);
+ pr_err("NCP_SYSCON_MCG_CSW_CPU: 0x%x\n", reg);
+ reg = readl(syscon + NCP_SYSCON_MCG_CSW_SYS);
+ pr_err("NCP_SYSCON_MCG_CSW_SYS: 0x%x\n", reg);
+ reg = readl(syscon + NCP_SYSCON_MCG_DIV_CPU);
+ pr_err("NCP_SYSCON_MCG_DIV_CPU: 0x%x\n", reg);
+ reg = readl(syscon + NCP_SYSCON_MCG_DIV_SYS);
+ pr_err("NCP_SYSCON_MCG_DIV_SYS: 0x%x\n", reg);
+ reg = readl(syscon + NCP_SYSCON_CLKDEBUG);
+ pr_err("NCP_SYSCON_CLKDEBUG: 0x%x\n", reg);
+ reg = readl(syscon + NCP_SYSCON_EVENT_ENB);
+ pr_err("NCP_SYSCON_EVENT_ENB: 0x%x\n", reg);
+ reg = readl(syscon + NCP_SYSCON_CPU_FAST_INT);
+ pr_err("NCP_SYSCON_CPU_FAST_INT: 0x%x\n", reg);
+ reg = readl(syscon + NCP_SYSCON_GIC_DISABLE);
+ pr_err("NCP_SYSCON_GIC_DISABLE: 0x%x\n", reg);
+ reg = readl(syscon + NCP_SYSCON_CP15SDISABLE);
+ pr_err("NCP_SYSCON_CP15SDISABLE: 0x%x\n", reg);
+ reg = readl(syscon + NCP_SYSCON_LDO_CTL);
+ pr_err("NCP_SYSCON_LDO_CTL: 0x%x\n", reg);
+ reg = readl(syscon + NCP_SYSCON_SHWK_QOS);
+ pr_err("NCP_SYSCON_SHWK_QOS: 0x%x\n", reg);
+ reg = readl(syscon + NCP_SYSCON_FUSE_RTO);
+ pr_err("NCP_SYSCON_FUSE_RTO: 0x%x\n", reg);
+ reg = readl(syscon + NCP_SYSCON_PFUSE);
+ pr_err("NCP_SYSCON_PFUSE: 0x%x\n", reg);
+ reg = readl(syscon + NCP_SYSCON_FUSE_STAT);
+ pr_err("NCP_SYSCON_FUSE_STAT: 0x%x\n", reg);
+ reg = readl(syscon + NCP_SYSCON_SCRATCH);
+ pr_err("NCP_SYSCON_SCRATCH: 0x%x\n", reg);
+ reg = readl(syscon + NCP_SYSCON_MASK_IPI0);
+ pr_err("NCP_SYSCON_MASK_IPI0: 0x%x\n", reg);
+ reg = readl(syscon + NCP_SYSCON_MASK_IPI1);
+ pr_err("NCP_SYSCON_MASK_IPI1: 0x%x\n", reg);
+ reg = readl(syscon + NCP_SYSCON_MASK_IPI2);
+ pr_err("NCP_SYSCON_MASK_IPI2: 0x%x\n", reg);
+ reg = readl(syscon + NCP_SYSCON_MASK_IPI3);
+ pr_err("NCP_SYSCON_MASK_IPI3: 0x%x\n", reg);
+ reg = readl(syscon + NCP_SYSCON_MASK_IPI4);
+ pr_err("NCP_SYSCON_MASK_IPI4: 0x%x\n", reg);
+ reg = readl(syscon + NCP_SYSCON_MASK_IPI5);
+ pr_err("NCP_SYSCON_MASK_IPI5: 0x%x\n", reg);
+ reg = readl(syscon + NCP_SYSCON_MASK_IPI6);
+ pr_err("NCP_SYSCON_MASK_IPI6: 0x%x\n", reg);
+ reg = readl(syscon + NCP_SYSCON_MASK_IPI7);
+ pr_err("NCP_SYSCON_MASK_IPI7: 0x%x\n", reg);
+ reg = readl(syscon + NCP_SYSCON_MASK_IPI8);
+ pr_err("NCP_SYSCON_MASK_IPI8: 0x%x\n", reg);
+ reg = readl(syscon + NCP_SYSCON_MASK_IPI9);
+ pr_err("NCP_SYSCON_MASK_IPI9: 0x%x\n", reg);
+ reg = readl(syscon + NCP_SYSCON_MASK_IPI10);
+ pr_err("NCP_SYSCON_MASK_IPI10: 0x%x\n", reg);
+ reg = readl(syscon + NCP_SYSCON_MASK_IPI11);
+ pr_err("NCP_SYSCON_MASK_IPI11: 0x%x\n", reg);
+ reg = readl(syscon + NCP_SYSCON_MASK_IPI12);
+ pr_err("NCP_SYSCON_MASK_IPI12: 0x%x\n", reg);
+ reg = readl(syscon + NCP_SYSCON_MASK_IPI13);
+ pr_err("NCP_SYSCON_MASK_IPI13: 0x%x\n", reg);
+ reg = readl(syscon + NCP_SYSCON_MASK_IPI14);
+ pr_err("NCP_SYSCON_MASK_IPI14: 0x%x\n", reg);
+ reg = readl(syscon + NCP_SYSCON_MASK_IPI15);
+ pr_err("NCP_SYSCON_MASK_IPI15: 0x%x\n", reg);
+ reg = readl(syscon + NCP_SYSCON_SPARE0);
+ pr_err("NCP_SYSCON_SPARE0: 0x%x\n", reg);
+ reg = readl(syscon + NCP_SYSCON_STOP_CLK_CPU);
+ pr_err("NCP_SYSCON_STOP_CLK_CPU: 0x%x\n", reg);
+#endif
+
+
+}
+
+
+void pm_dump_L2_registers(void)
+{
+ u32 reg;
+
+
+ reg = readl(syscon + 0x1580);
+ pr_err("NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM2: 0x%x\n", reg);
+ reg = readl(syscon + 0x1584);
+ pr_err("NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM1: 0x%x\n", reg);
+ reg = readl(syscon + 0x1588);
+ pr_err("NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM0: 0x%x\n", reg);
+ reg = readl(syscon + 0x158c);
+ pr_err("NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM2: 0x%x\n", reg);
+ reg = readl(syscon + 0x1590);
+ pr_err("NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM1: 0x%x\n", reg);
+ reg = readl(syscon + 0x1594);
+ pr_err("NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM0: 0x%x\n", reg);
+ reg = readl(syscon + 0x1598);
+ pr_err("NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM2: 0x%x\n", reg);
+ reg = readl(syscon + 0x159c);
+ pr_err("NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM1: 0x%x\n", reg);
+ reg = readl(syscon + 0x15a0);
+ pr_err("NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM0: 0x%x\n", reg);
+ reg = readl(syscon + 0x15a4);
+ pr_err("NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM2: 0x%x\n", reg);
+ reg = readl(syscon + 0x15a8);
+ pr_err("NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM1: 0x%x\n", reg);
+ reg = readl(syscon + 0x15ac);
+ pr_err("NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM0: 0x%x\n", reg);
+
+
+
+
+void pm_dump_dickens(void)
+{
+
+ void __iomem *dickens;
+ u32 status;
+ u32 i;
+
+ dickens = ioremap(DICKENS_PHYS_ADDR, SZ_4M);
+ if (dickens == NULL) {
+ pr_err("DICKENS: Failed to map the dickens registers\n");
+ return;
+ }
+
+ for (i = 0; i < DKN_HNF_TOTAL_NODES; ++i) {
+ status = readl(
+ dickens + (0x10000 * (DKN_HNF_NODE_ID + i))
+ + DKN_HNF_SNOOP_DOMAIN_CTL);
+ udelay(1);
+ pr_err("DKN_HNF_SNOOP_DOMAIN_CTL[%d]: 0x%x\n", i, status);
+ }
+
+ status = readl(
+ dickens + (0x10000 * DKN_DVM_DOMAIN_OFFSET)
+ + DKN_MN_DVM_DOMAIN_CTL);
+
+ pr_err("DKN_MN_DVM_DOMAIN_CTL: 0x%x\n", status);
+
+
+
+}
+
+#endif
diff --git a/arch/arm/mach-axxia/lsi_power_management.h b/arch/arm/mach-axxia/lsi_power_management.h
new file mode 100644
index 0000000..f967a37
--- /dev/null
+++ b/arch/arm/mach-axxia/lsi_power_management.h
@@ -0,0 +1,192 @@
+/*
+ * lsi_power_management.h
+ *
+ * Created on: Jun 23, 2014
+ * Author: z8cpaul
+ */
+
+#ifndef LSI_POWER_MANAGEMENT_H_
+#define LSI_POWER_MANAGEMENT_H_
+
+
+#define NCP_SYSCON_MCG_CSW_CPU (0x00000000)
+#define NCP_SYSCON_MCG_CSW_SYS (0x00000004)
+#define NCP_SYSCON_MCG_DIV_CPU (0x00000008)
+#define NCP_SYSCON_MCG_DIV_SYS (0x0000000c)
+#define NCP_SYSCON_CLKDEBUG (0x00000010)
+#define NCP_SYSCON_EVENT_ENB (0x00000014)
+#define NCP_SYSCON_CPU_FAST_INT (0x00000018)
+#define NCP_SYSCON_GIC_DISABLE (0x0000001c)
+#define NCP_SYSCON_CP15SDISABLE (0x00000020)
+#define NCP_SYSCON_LRSTDISABLE (0x00000024)
+#define NCP_SYSCON_LDO_CTL (0x00000028)
+#define NCP_SYSCON_SHWK_QOS (0x0000002c)
+#define NCP_SYSCON_FUSE_RTO (0x00000030)
+#define NCP_SYSCON_PFUSE (0x00000034)
+#define NCP_SYSCON_FUSE_STAT (0x00000038)
+#define NCP_SYSCON_SCRATCH (0x0000003c)
+#define NCP_SYSCON_MASK_IPI0 (0x00000040)
+#define NCP_SYSCON_MASK_IPI1 (0x00000044)
+#define NCP_SYSCON_MASK_IPI2 (0x00000048)
+#define NCP_SYSCON_MASK_IPI3 (0x0000004c)
+#define NCP_SYSCON_MASK_IPI4 (0x00000050)
+#define NCP_SYSCON_MASK_IPI5 (0x00000054)
+#define NCP_SYSCON_MASK_IPI6 (0x00000058)
+#define NCP_SYSCON_MASK_IPI7 (0x0000005c)
+#define NCP_SYSCON_MASK_IPI8 (0x00000060)
+#define NCP_SYSCON_MASK_IPI9 (0x00000064)
+#define NCP_SYSCON_MASK_IPI10 (0x00000068)
+#define NCP_SYSCON_MASK_IPI11 (0x0000006c)
+#define NCP_SYSCON_MASK_IPI12 (0x00000070)
+#define NCP_SYSCON_MASK_IPI13 (0x00000074)
+#define NCP_SYSCON_MASK_IPI14 (0x00000078)
+#define NCP_SYSCON_MASK_IPI15 (0x0000007c)
+#define NCP_SYSCON_MASK_IPI16 (0x00000080)
+#define NCP_SYSCON_MASK_IPI17 (0x00000084)
+#define NCP_SYSCON_MASK_IPI18 (0x00000088)
+#define NCP_SYSCON_SPARE0 (0x0000008c)
+#define NCP_SYSCON_STOP_CLK_CPU (0x00000090)
+
+
+#define NCP_SYSCON_RESET_STATUS (0x00000100)
+#define NCP_SYSCON_RESET_CORE_STATUS (0x00000108)
+
+#define NCP_SYSCON_KEY (0x00001000)
+#define NCP_SYSCON_RESET_CTL (0x00001008)
+#define NCP_SYSCON_RESET_CPU (0x0000100c)
+#define NCP_SYSCON_HOLD_CPU (0x00001010)
+#define NCP_SYSCON_HOLD_PTM (0x00001014)
+#define NCP_SYSCON_HOLD_L2 (0x00001018)
+#define NCP_SYSCON_HOLD_DBG (0x0000101c)
+
+#define NCP_SYSCON_PWRUP_CPU_RST (0x00001030)
+
+#define NCP_SYSCON_RESET_AXIS (0x00001040)
+#define NCP_SYSCON_RESET_AXIS_ACCESS_SIZE (0x00000008)
+
+#define NCP_SYSCON_PWR_CLKEN (0x00001400)
+#define NCP_SYSCON_ENABLE_CLKEN_SET (0x00001404)
+#define NCP_SYSCON_PWR_ACINACTM (0x00001408)
+#define NCP_SYSCON_PWR_CHIPSELECTEN (0x0000140c)
+#define NCP_SYSCON_PWR_CSYSREQ_TS (0x00001410)
+#define NCP_SYSCON_PWR_CSYSREQ_CNT (0x00001414)
+#define NCP_SYSCON_PWR_CSYSREQ_ATB (0x00001418)
+#define NCP_SYSCON_PWR_CSYSREQ_APB (0x0000141c)
+#define NCP_SYSCON_PWR_PWRUPL2LGCSTG1 (0x00001420)
+#define NCP_SYSCON_PWR_PWRUPL2LGCSTG2 (0x00001424)
+#define NCP_SYSCON_PWR_PWRUPL2HSRAM (0x00001428)
+#define NCP_SYSCON_PWR_ACEPWRDNRQ (0x0000142c)
+#define NCP_SYSCON_PWR_ISOLATEL2MISC (0x00001430)
+#define NCP_SYSCON_PWR_NPWRUPL2LGCSTG1_ACK (0x00001438)
+#define NCP_SYSCON_PWR_NPWRUPL2HSRAM_ACK (0x0000143c)
+#define NCP_SYSCON_PWR_STANDBYWFIL2 (0x00001440)
+#define NCP_SYSCON_PWR_CSYSACK_TS (0x00001444)
+#define NCP_SYSCON_PWR_CACTIVE_TS (0x00001448)
+#define NCP_SYSCON_PWR_CSYSACK_CNT (0x0000144c)
+#define NCP_SYSCON_PWR_CACTIVE_CNT (0x00001450)
+#define NCP_SYSCON_PWR_CSYSACK_ATB (0x00001454)
+#define NCP_SYSCON_PWR_CACTIVE_ATB (0x00001458)
+#define NCP_SYSCON_PWR_CSYSACK_APB (0x0000145c)
+#define NCP_SYSCON_PWR_CACTIVE_APB (0x00001460)
+#define NCP_SYSCON_PWR_NACEPWRDNACK (0x00001464)
+#define NCP_SYSCON_PWR_CACTIVEM_EAGM (0x00001468)
+#define NCP_SYSCON_PWR_CACTIVEM_EAGS (0x0000146c)
+#define NCP_SYSCON_PWR_CACTIVES_EAGM (0x00001470)
+#define NCP_SYSCON_PWR_CACTIVES_EAGS (0x00001474)
+#define NCP_SYSCON_PWR_PWRUPCPUSTG1 (0x00001480)
+#define NCP_SYSCON_PWR_PWRUPCPUSTG2 (0x00001484)
+#define NCP_SYSCON_PWR_PWRUPCPURAM (0x00001488)
+#define NCP_SYSCON_PWR_ISOLATECPU (0x0000148c)
+#define NCP_SYSCON_PWR_NPWRUPCPUSTG1_ACK (0x00001490)
+#define NCP_SYSCON_PWR_NPWRUPCPURAM_ACK (0x00001494)
+#define NCP_SYSCON_PWR_QACTIVE (0x00001498)
+#define NCP_SYSCON_PWR_STANDBYWFI (0x0000149c)
+#define NCP_SYSCON_PWR_STANDBYWFE (0x000014a0)
+#define NCP_SYSCON_PWR_DBGNOPWRDWN (0x000014a4)
+#define NCP_SYSCON_PWR_DBGPWRUPREQ (0x000014a8)
+#define NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM2 (0x00001580)
+#define NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM1 (0x00001584)
+#define NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM0 (0x00001588)
+#define NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM2 (0x0000158c)
+#define NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM1 (0x00001590)
+#define NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM0 (0x00001594)
+#define NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM2 (0x00001598)
+#define NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM1 (0x0000159c)
+#define NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM0 (0x000015a0)
+#define NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM2 (0x000015a4)
+#define NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM1 (0x000015a8)
+#define NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM0 (0x000015ac)
+
+#define RAM_BANK0_MASK (0x0FFF0000)
+#define RAM_BANK1_LS_MASK (0xF0000000)
+#define RAM_BANK1_MS_MASK (0x000000FF)
+#define RAM_BANK2_MASK (0x000FFF00)
+#define RAM_BANK3_MASK (0xFFF00000)
+#define RAM_ALL_MASK (0xFFFFFFFF)
+
+/* DICKENS REGISTERS (Miscelaneous Node) */
+#define DKN_MN_NODE_ID (0x0)
+#define DKN_DVM_DOMAIN_OFFSET (0x0)
+#define DKN_MN_DVM_DOMAIN_CTL (0x200)
+#define DKN_MN_DVM_DOMAIN_CTL_SET (0x210)
+#define DKN_MN_DVM_DOMAIN_CTL_CLR (0x220)
+
+/* DICKENS HN-F (Fully-coherent Home Node) */
+#define DKN_HNF_NODE_ID (0x20)
+#define DKN_HNF_TOTAL_NODES (0x8)
+#define DKN_HNF_SNOOP_DOMAIN_CTL (0x200)
+#define DKN_HNF_SNOOP_DOMAIN_CTL_SET (0x210)
+#define DKN_HNF_SNOOP_DOMAIN_CTL_CLR (0x220)
+
+/* DICKENS clustid to Node */
+#define DKN_CLUSTER0_NODE (1)
+#define DKN_CLUSTER1_NODE (9)
+#define DKN_CLUSTER2_NODE (11)
+#define DKN_CLUSTER3_NODE (19)
+
+/* PO RESET cluster id to bit */
+#define PORESET_CLUSTER0 (0x10000)
+#define PORESET_CLUSTER1 (0x20000)
+#define PORESET_CLUSTER2 (0x40000)
+#define PORESET_CLUSTER3 (0x80000)
+
+/* IPI Masks */
+#define IPI0_MASK (0x1111)
+#define IPI1_MASK (0x2222)
+#define IPI2_MASK (0x4444)
+#define IPI3_MASK (0x8888)
+
+/* SYSCON KEY Value */
+#define VALID_KEY_VALUE (0xAB)
+
+#define MAX_NUM_CLUSTERS (4)
+#define CORES_PER_CLUSTER (4)
+#define MAX_IPI (19)
+#define MAX_CPUS (MAX_NUM_CLUSTERS * CORES_PER_CLUSTER)
+
+typedef struct {
+ u32 cpu;
+ u32 cluster;
+} pm_data;
+
+
+void pm_cpu_shutdown(u32 cpu);
+int pm_cpu_powerup(u32 cpu);
+void pm_debug_read_pwr_registers(void);
+void pm_dump_L2_registers(void);
+int pm_cpu_logical_die(pm_data *pm_request);
+int pm_cpul2_logical_die(pm_data *pm_request);
+unsigned long pm_get_powered_down_cpu(void);
+bool pm_cpu_last_of_cluster(u32 cpu);
+void pm_dump_dickens(void);
+void pm_init_cpu(u32 cpu);
+void pm_cpu_logical_powerup(void);
+void pm_cluster_logical_powerup(void);
+bool pm_cpu_active(u32 cpu);
+void pm_init_syscon(void);
+extern bool pm_in_progress[];
+extern bool cluster_power_up[];
+extern u32 pm_cpu_powered_down;
+
+
+#endif /* LSI_POWER_MANAGEMENT_H_ */
diff --git a/arch/arm/mach-axxia/perf_event_memc.c b/arch/arm/mach-axxia/perf_event_memc.c
new file mode 100644
index 0000000..7da5926
--- /dev/null
+++ b/arch/arm/mach-axxia/perf_event_memc.c
@@ -0,0 +1,152 @@
+/*
+ * arch/arm/mach-axxia/perf_event_memc.c
+ * included from arch/arm/mach-axxia/perf_event_platform.c
+ *
+ * Support for the LSI Axxia boards based on ARM cores.
+ *
+ * Copyright (C) 2014 LSI
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include "perf_event_memc.h"
+
+static void memc_startup_init(void)
+{
+ uint32_t config;
+
+ smon_init_ncp(&ddrc0_smon, DDRC0, DDRC_PERF, DDRC_SMON);
+ smon_init_ncp(&ddrc1_smon, DDRC1, DDRC_PERF, DDRC_SMON);
+ smon_init_mem(&elm0_smon, ELM0, ELM_SMON);
+ smon_init_mem(&elm1_smon, ELM1, ELM_SMON);
+
+ /* enable SMC SMON registers */
+ ncr_read(NCP_REGION_ID(DDRC0, DDRC_CTRL), CTRL_SMON,
+ REG_SZ, &config);
+ config |= SMON_ENABLE;
+ ncr_write(NCP_REGION_ID(DDRC0, DDRC_CTRL), CTRL_SMON,
+ REG_SZ, &config);
+
+ ncr_read(NCP_REGION_ID(DDRC1, DDRC_CTRL), CTRL_SMON,
+ REG_SZ, &config);
+ config |= SMON_ENABLE;
+ ncr_write(NCP_REGION_ID(DDRC1, DDRC_CTRL), CTRL_SMON,
+ REG_SZ, &config);
+}
+
+static uint32_t memc_pmu_event_init(uint32_t event, struct perf_event *pevent)
+{
+ return 0;
+}
+
+static uint32_t memc_pmu_event_add(uint32_t ev, struct perf_event *pevent)
+{
+ uint32_t ret;
+
+ if (ev >= DDRC0_OFFSET && ev <= DDRC0_SMON_MAX) {
+
+ ret = smon_allocate(&ddrc0_smon, ev - DDRC0_OFFSET);
+ if (ret != 0)
+ return ret;
+
+ ret = smon_start(&ddrc0_smon, ev - DDRC0_OFFSET);
+ if (ret != 0)
+ return ret;
+ } else if (ev >= DDRC1_OFFSET && ev <= DDRC1_SMON_MAX) {
+
+ ret = smon_allocate(&ddrc1_smon, ev - DDRC1_OFFSET);
+ if (ret != 0)
+ return ret;
+
+ ret = smon_start(&ddrc1_smon, ev - DDRC1_OFFSET);
+ if (ret != 0)
+ return ret;
+ } else if (ev >= ELM0_OFFSET && ev <= ELM0_SMON_MAX) {
+
+ ret = smon_allocate(&elm0_smon, ev - ELM0_OFFSET);
+ if (ret != 0)
+ return ret;
+
+ ret = smon_start(&elm0_smon, ev - ELM0_OFFSET);
+ if (ret != 0)
+ return ret;
+ } else if (ev >= ELM1_OFFSET && ev <= ELM1_SMON_MAX) {
+
+ ret = smon_allocate(&elm1_smon, ev - ELM1_OFFSET);
+ if (ret != 0)
+ return ret;
+
+ ret = smon_start(&elm1_smon, ev - ELM1_OFFSET);
+ if (ret != 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * Return counter update.
+ */
+static uint32_t memc_pmu_event_read(uint32_t ev, struct perf_event *pevent,
+ int flags)
+{
+ uint32_t count = 0;
+
+ if (ev >= DDRC0_OFFSET && ev <= DDRC0_SMON_MAX)
+ count = smon_read(&ddrc0_smon, ev - DDRC0_OFFSET);
+ else if (ev >= DDRC1_OFFSET && ev <= DDRC1_SMON_MAX)
+ count = smon_read(&ddrc1_smon, ev - DDRC1_OFFSET);
+ else if (ev >= ELM0_OFFSET && ev <= ELM0_SMON_MAX)
+ count = smon_read(&elm0_smon, ev - ELM0_OFFSET);
+ else if (ev >= ELM1_OFFSET && ev <= ELM1_SMON_MAX)
+ count = smon_read(&elm1_smon, ev - ELM1_OFFSET);
+
+ if (count == -ENOEVENT)
+ count = 0;
+
+ return count;
+}
+
+/*
+ * Remove event and return counter update.
+ */
+static uint32_t memc_pmu_event_del(uint32_t ev, struct perf_event *pevent,
+ int flags)
+{
+ uint32_t count = 0;
+
+ if (ev >= DDRC0_OFFSET && ev <= DDRC0_SMON_MAX) {
+ count = smon_read(&ddrc0_smon, ev - DDRC0_OFFSET);
+
+ smon_deallocate(&ddrc0_smon, ev - DDRC0_OFFSET);
+ } else if (ev >= DDRC1_OFFSET && ev <= DDRC1_SMON_MAX) {
+ count = smon_read(&ddrc1_smon, ev - DDRC1_OFFSET);
+
+ smon_deallocate(&ddrc1_smon, ev - DDRC1_OFFSET);
+ } else if (ev >= ELM0_OFFSET && ev <= ELM0_SMON_MAX) {
+ count = smon_read(&elm0_smon, ev - ELM0_OFFSET);
+
+ smon_deallocate(&elm0_smon, ev - ELM0_OFFSET);
+ } else if (ev >= ELM1_OFFSET && ev <= ELM1_SMON_MAX) {
+ count = smon_read(&elm1_smon, ev - ELM1_OFFSET);
+
+ smon_deallocate(&elm1_smon, ev - ELM1_OFFSET);
+ }
+
+ if (count == -ENOEVENT)
+ count = 0;
+
+ return count;
+}
diff --git a/arch/arm/mach-axxia/perf_event_memc.h b/arch/arm/mach-axxia/perf_event_memc.h
new file mode 100644
index 0000000..6e9ba519
--- /dev/null
+++ b/arch/arm/mach-axxia/perf_event_memc.h
@@ -0,0 +1,67 @@
+/*
+ * arch/arm/mach-axxia/perf_event_memc.h
+ * included from arch/arm/mach-axxia/perf_event_memc.c
+ *
+ * Support for the LSI Axxia boards based on ARM cores.
+ *
+ * Copyright (C) 2014 LSI
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __ASM__ARCH_AXXIA_PERF_EVENT_MEMC_H
+#define __ASM__ARCH_AXXIA_PERF_EVENT_MEMC_H
+
+#define DDRC0_OFFSET 0x00
+#define DDRC0_SMON_MAX (DDRC0_OFFSET + 22)
+#define DDRC1_OFFSET 0x100
+#define DDRC1_SMON_MAX (DDRC1_OFFSET + 22)
+
+#define ELM0_OFFSET 0x200
+#define ELM0_SMON_MAX (ELM0_OFFSET + 15)
+#define ELM1_OFFSET 0x300
+#define ELM1_SMON_MAX (ELM1_OFFSET + 15)
+
+/* Node */
+#define DDRC0 0x0f
+#define DDRC1 0x22
+/* Target */
+#define DDRC_CTRL 0x00
+#define DDRC_PERF 0x02
+/* Address */
+#define CTRL_SMON 0x1fc
+
+#ifdef AXM55XX_R1
+#define DDRC_SMON 0x40
+#endif
+#ifdef AXM55XX_R2
+#define DDRC_SMON 0xA0
+#endif
+
+/* Settings */
+#define SMON_ENABLE 0x20000000
+
+/* Base Address */
+#define ELM0 0x2010060000
+#define ELM1 0x2010070000
+/* SMON Offset */
+#define ELM_SMON (0x300/4)
+
+struct smon_s ddrc0_smon;
+struct smon_s ddrc1_smon;
+struct smon_s elm0_smon;
+struct smon_s elm1_smon;
+
+#endif
diff --git a/arch/arm/mach-axxia/perf_event_pcx.c b/arch/arm/mach-axxia/perf_event_pcx.c
new file mode 100644
index 0000000..7896bb9
--- /dev/null
+++ b/arch/arm/mach-axxia/perf_event_pcx.c
@@ -0,0 +1,52 @@
+/*
+ * arch/arm/mach-axxia/perf_event_pcx.c
+ * included from arch/arm/mach-axxia/perf_event_platform.c
+ *
+ * Support for the LSI Axxia boards based on ARM cores.
+ *
+ * Copyright (C) 2014 LSI
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+/*
+ * Generic PCX
+ */
+
+static void pcx_startup_init(void)
+{
+}
+
+static uint32_t pcx_pmu_event_init(uint32_t ev, struct perf_event *event)
+{
+ return 0;
+}
+
+static uint32_t pcx_pmu_event_add(uint32_t ev, struct perf_event *event)
+{
+ return 0;
+}
+
+static uint32_t pcx_pmu_event_read(uint32_t ev, struct perf_event *event,
+ int flags)
+{
+ return 0;
+}
+
+static uint32_t pcx_pmu_event_del(uint32_t ev, struct perf_event *event,
+ int flags)
+{
+ return 0;
+}
diff --git a/arch/arm/mach-axxia/perf_event_platform.c b/arch/arm/mach-axxia/perf_event_platform.c
new file mode 100644
index 0000000..538fd1a
--- /dev/null
+++ b/arch/arm/mach-axxia/perf_event_platform.c
@@ -0,0 +1,275 @@
+/*
+ * arch/arm/mach-axxia/perf_event_platform.c
+ *
+ * Support for the LSI Axxia boards based on ARM cores.
+ *
+ * Copyright (C) 2014 LSI
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+
+#include <linux/bitmap.h>
+#include <linux/cpu_pm.h>
+#include <linux/export.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include <asm/cputype.h>
+#include <asm/irq_regs.h>
+#include <asm/pmu.h>
+
+#include <linux/kthread.h>
+#include <linux/sched.h>
+
+#include <linux/cpu.h>
+#include <linux/reboot.h>
+#include <linux/syscore_ops.h>
+
+#include <linux/proc_fs.h>
+
+#include <linux/io.h>
+#include <asm/cacheflush.h>
+#include <../../../drivers/misc/lsi-ncr.h>
+
+#include "perf_event_platform.h"
+
+#include "smon.h"
+
+/*
+ * Include code for individual block support
+ */
+
+#include "perf_event_pcx.c"
+#include "perf_event_vp.c"
+#include "perf_event_memc.c"
+
+/*
+ * General platform perf code, muxed out to individual blocks
+ */
+
+int platform_pmu_event_idx(struct perf_event *event)
+{
+ return 0;
+}
+
+int platform_pmu_event_init(struct perf_event *event)
+{
+ uint64_t ev = event->attr.config;
+
+ if (event->attr.type != event->pmu->type)
+ return -ENOENT;
+
+ if ((ev < AXM_55XX_PLATFORM_BASE) || (ev > AXM_55XX_PLATFORM_MAX))
+ return -ENOENT;
+
+ event->hw.config = ev - AXM_55XX_PLATFORM_BASE;
+
+ event->hw.idx = -1;
+ event->hw.config_base = 1;
+
+/*
+ if (event->group_leader != event) {
+ printk("This is not the group leader!\n");
+ printk("event->group_leader 0x%x\n", (unsigned int)event->group_leader);
+ }
+*/
+
+ if (event->attr.exclude_user)
+ return -ENOTSUPP;
+ if (event->attr.exclude_kernel)
+ return -ENOTSUPP;
+ if (event->attr.exclude_idle)
+ return -ENOTSUPP;
+
+ event->hw.last_period = event->hw.sample_period;
+ local64_set(&event->hw.period_left, event->hw.last_period);
+/*
+ event->destroy = hw_perf_event_destroy;
+*/
+ local64_set(&event->count, 0);
+
+ if (ev >= AXM_55XX_VP_BASE && ev <= AXM_55XX_VP_MAX)
+ vp_pmu_event_init(ev - AXM_55XX_VP_BASE, event);
+ else if (ev >= AXM_55XX_PCX_BASE && ev <= AXM_55XX_PCX_MAX)
+ pcx_pmu_event_init(ev - AXM_55XX_PCX_BASE, event);
+ else if (ev >= AXM_55XX_MEMC_BASE && ev <= AXM_55XX_MEMC_MAX)
+ memc_pmu_event_init(ev - AXM_55XX_MEMC_BASE, event);
+ else
+ pr_info("Platform perf, undefined event, %llu\n", ev);
+
+ return 0;
+}
+
+static int platform_pmu_event_add(struct perf_event *event, int flags)
+{
+ uint64_t ev = event->attr.config;
+
+ if (ev >= AXM_55XX_VP_BASE && ev <= AXM_55XX_VP_MAX)
+ vp_pmu_event_add(ev - AXM_55XX_VP_BASE, event);
+ else if (ev >= AXM_55XX_PCX_BASE && ev <= AXM_55XX_PCX_MAX)
+ pcx_pmu_event_add(ev - AXM_55XX_PCX_BASE, event);
+ else if (ev >= AXM_55XX_MEMC_BASE && ev <= AXM_55XX_MEMC_MAX)
+ memc_pmu_event_add(ev - AXM_55XX_MEMC_BASE, event);
+
+ return 0;
+}
+
+static void platform_pmu_event_del(struct perf_event *event, int flags)
+{
+ uint64_t ev = event->attr.config;
+ uint32_t n;
+
+ if (ev >= AXM_55XX_VP_BASE && ev <= AXM_55XX_VP_MAX) {
+ n = vp_pmu_event_del(ev - AXM_55XX_VP_BASE, event, flags);
+ local64_add(n, &event->count);
+ } else if (ev >= AXM_55XX_PCX_BASE && ev <= AXM_55XX_PCX_MAX) {
+ n = pcx_pmu_event_del(ev - AXM_55XX_PCX_BASE, event, flags);
+ local64_add(n, &event->count);
+ } else if (ev >= AXM_55XX_MEMC_BASE && ev <= AXM_55XX_MEMC_MAX) {
+ n = memc_pmu_event_del(ev - AXM_55XX_MEMC_BASE, event, flags);
+ local64_add(n, &event->count);
+ } else {
+ local64_set(&event->count, 0);
+ }
+}
+
+static void platform_pmu_event_start(struct perf_event *event, int flags)
+{
+}
+
+static void platform_pmu_event_stop(struct perf_event *event, int flags)
+{
+}
+
+static void platform_pmu_event_read(struct perf_event *event)
+{
+ uint64_t ev = event->attr.config;
+ uint32_t n;
+
+ if (ev >= AXM_55XX_VP_BASE && ev <= AXM_55XX_VP_MAX) {
+ n = vp_pmu_event_read(ev - AXM_55XX_VP_BASE, event, 0);
+ local64_add(n, &event->count);
+ } else if (ev >= AXM_55XX_PCX_BASE && ev <= AXM_55XX_PCX_MAX) {
+ n = pcx_pmu_event_read(ev - AXM_55XX_PCX_BASE, event, 0);
+ local64_add(n, &event->count);
+ } else if (ev >= AXM_55XX_MEMC_BASE && ev <= AXM_55XX_MEMC_MAX) {
+ n = memc_pmu_event_read(ev - AXM_55XX_MEMC_BASE, event, 0);
+ local64_add(n, &event->count);
+ }
+}
+
+/*
+ * Device
+ */
+
+static void axmperf_device_release(struct device *dev)
+{
+ pr_warn("AXM55xxPlatformPerf release device\n");
+}
+
+static struct platform_device axmperf_device = {
+ .name = "AXM55xxPlatformPerf",
+ .id = 0,
+ .dev = {
+ .release = axmperf_device_release,
+ },
+};
+
+/*
+ * Driver
+ */
+
+#define PLATFORM_PMU_NAME_LEN 32
+
+struct lsi_platform_pmu {
+ struct pmu pmu;
+ char name[PLATFORM_PMU_NAME_LEN];
+};
+
+static int axmperf_probe(struct platform_device *dev)
+{
+ int ret;
+ struct lsi_platform_pmu *axm_pmu;
+
+ axm_pmu = kzalloc(sizeof(struct lsi_platform_pmu), GFP_KERNEL);
+ if (!axm_pmu) {
+ pr_warn("Failed platform perf memory alloc!\n");
+ return -ENOMEM;
+ }
+
+ axm_pmu->pmu = (struct pmu) {
+ .attr_groups = 0,
+ .event_init = platform_pmu_event_init,
+ .add = platform_pmu_event_add,
+ .del = platform_pmu_event_del,
+ .start = platform_pmu_event_start,
+ .stop = platform_pmu_event_stop,
+ .read = platform_pmu_event_read,
+ .event_idx = platform_pmu_event_idx,
+ };
+
+ sprintf(axm_pmu->name, "LSI AXM55xx Platform");
+
+ ret = perf_pmu_register(&axm_pmu->pmu, axm_pmu->name, PERF_TYPE_RAW);
+
+ if (ret == 0)
+ pr_info("axxia platform perf enabled\n");
+ else
+ pr_info("axxia platform perf failed\n");
+
+ vp_startup_init();
+ pcx_startup_init();
+ memc_startup_init();
+
+ return ret;
+}
+
+static const struct of_device_id lsi_platformperf_match[] = {
+ { .compatible = "lsi,axm-platformperf", },
+ {},
+};
+
+static struct platform_driver axmperf_driver = {
+ .driver = {
+ .name = "AXM55xxPlatformPerf",
+ .of_match_table = lsi_platformperf_match,
+ .owner = THIS_MODULE,
+ },
+ .probe = axmperf_probe,
+};
+
+static int __init axmperf_init(void)
+{
+ platform_driver_register(&axmperf_driver);
+
+ return 0;
+}
+
+static void __exit axmperf_exit(void)
+{
+ pr_warn("AXM55xx platform perf exit!\n");
+ platform_driver_unregister(&axmperf_driver);
+ platform_device_unregister(&axmperf_device);
+}
+
+module_init(axmperf_init);
+module_exit(axmperf_exit);
+MODULE_LICENSE("GPL");
diff --git a/arch/arm/mach-axxia/perf_event_platform.h b/arch/arm/mach-axxia/perf_event_platform.h
new file mode 100644
index 0000000..29698c5
--- /dev/null
+++ b/arch/arm/mach-axxia/perf_event_platform.h
@@ -0,0 +1,10 @@
+#define AXM55XX_R2 "1.1"
+
+#define AXM_55XX_PLATFORM_BASE 0x10000
+#define AXM_55XX_VP_BASE (AXM_55XX_PLATFORM_BASE + 0x00)
+#define AXM_55XX_VP_MAX (AXM_55XX_VP_BASE + 0x1fff)
+#define AXM_55XX_PCX_BASE (AXM_55XX_PLATFORM_BASE + 0x4000)
+#define AXM_55XX_PCX_MAX (AXM_55XX_PCX_BASE + 0x0fff)
+#define AXM_55XX_MEMC_BASE (AXM_55XX_PLATFORM_BASE + 0x8000)
+#define AXM_55XX_MEMC_MAX (AXM_55XX_MEMC_BASE + 0x0fff)
+#define AXM_55XX_PLATFORM_MAX (AXM_55XX_MEMC_MAX)
diff --git a/arch/arm/mach-axxia/perf_event_vp.c b/arch/arm/mach-axxia/perf_event_vp.c
new file mode 100644
index 0000000..ebd35b0
--- /dev/null
+++ b/arch/arm/mach-axxia/perf_event_vp.c
@@ -0,0 +1,53 @@
+/*
+ * arch/arm/mach-axxia/perf_event_vp.c
+ * included from arch/arm/mach-axxia/perf_event_platform.c
+ *
+ * Support for the LSI Axxia boards based on ARM cores.
+ *
+ * Copyright (C) 2014 LSI
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+
+/*
+ * Generic VP
+ */
+
+static void vp_startup_init(void)
+{
+}
+
+static uint32_t vp_pmu_event_init(uint32_t event, struct perf_event *pevent)
+{
+ return 0;
+}
+
+static uint32_t vp_pmu_event_add(uint32_t event, struct perf_event *pevent)
+{
+ return 0;
+}
+
+static uint32_t vp_pmu_event_read(uint32_t event, struct perf_event *pevent,
+ int flags)
+{
+ return 0;
+}
+
+static uint32_t vp_pmu_event_del(uint32_t event, struct perf_event *pevent,
+ int flags)
+{
+ return 0;
+}
diff --git a/arch/arm/mach-axxia/rapidio.c b/arch/arm/mach-axxia/rapidio.c
new file mode 100644
index 0000000..3562375
--- /dev/null
+++ b/arch/arm/mach-axxia/rapidio.c
@@ -0,0 +1,112 @@
+/*
+ * linux/arch/arm/mach-axxia/rapidio.c
+ *
+ * Helper module for board specific RAPIDIO bus registration
+ *
+ * Copyright (C) 2013-2014 LSI Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/signal.h>
+
+#include <mach/ncr.h>
+#include <mach/rio.h>
+
+/**
+ * axxia_rapidio_board_init -
+ * Perform board-/controller-specific initialization to support
+ * use of RapidIO busses
+ *
+ * @dev: [IN] RIO platform device
+ * @ndx: [IN] Which instance of SRIOC driver needs support
+ * @port_ndx: [OUT] Which port to use for the specified controller
+ *
+ * Returns 0 on success or an error code.
+ */
+
+int
+axxia_rapidio_board_init(struct platform_device *dev, int dev_num, int *port_ndx)
+{
+ /* Reset the RIO port id to zero for this device */
+ void __iomem *gpreg_base = ioremap(0x2010094000, 0x1000);
+ unsigned long reg = 0;
+
+ if (gpreg_base == NULL)
+ return -EFAULT;
+
+ reg = inl((unsigned long int)(gpreg_base + 0x60));
+
+ reg &= ~(0xf << (dev_num * 4));
+
+ outl_p(reg, (unsigned long int)(gpreg_base + 0x60));
+
+ (*port_ndx) = 0;
+
+ /* Verify that this device is actually enabled */
+ if (NULL !=
+ of_find_compatible_node(NULL, NULL, "lsi,axm5500-amarillo")) {
+ ncr_read(NCP_REGION_ID(0x115, 0), 0x23c, 4, ®);
+
+ if ((reg & (1 << (21+(dev_num*4)))) == 0) {
+ dev_dbg(&dev->dev, "%s: SRIO%d link not ready\n",
+ dev->dev.of_node->full_name, dev_num);
+ return -ENXIO;
+ }
+ }
+
+ iounmap(gpreg_base);
+
+ return 0;
+}
+
+/**
+ * axxia_rio_fault -
+ * Intercept SRIO bus faults due to unimplemented register locations.
+ * Return 0 to keep 'reads' alive.
+ */
+
+static int
+axxia_rio_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
+{
+ /* unsigned long pc = instruction_pointer(regs); */
+ /* unsigned long instr = *(unsigned long *)pc; */
+ return 0;
+}
+
+/**
+ * axxia_rapidio_init -
+ * Perform board-specific initialization to support use of RapidIO busses
+ *
+ * Returns 0 on success or an error code.
+ */
+int __init
+axxia_rapidio_init(void)
+{
+ hook_fault_code(0x11, axxia_rio_fault, SIGBUS, 0,
+ "asynchronous external abort");
+
+ return 0;
+}
--
1.7.9.5
More information about the linux-yocto
mailing list