[linux-yocto] [PATCH v2 23/39] ARM: axxia: Add cluster and L2 power off in hotplug
Cristian Bercaru
cristian.bercaru at windriver.com
Thu May 21 02:40:48 PDT 2015
From: Magnus Karlsson <magnus.karlsson at intel.com>
Adds a choice to hotplug. There are two ways to power down the cpu,
either to a low power mode or completely off. If the power off mode
is selected then the a new option is available to power off the
L2 cache as well.
This code was originally written by Charlie Paul
<cpaul.windriver at gmail.com> and John Jacques <john.jacques at lsi.com>.
Only minor modifications performed by the author of this patch.
Signed-off-by: Charlie Paul <charlie.paul at windriver.com>
Signed-off-by: John Jacques <john.jacques at lsi.com>
Signed-off-by: Magnus Karlsson <magnus.karlsson at intel.com>
---
arch/arm/Kconfig | 37 ++
arch/arm/mach-axxia/axxia.c | 2 -
arch/arm/mach-axxia/axxia.h | 5 +
arch/arm/mach-axxia/ddr_retention.c | 3 +-
arch/arm/mach-axxia/hotplug.c | 226 +++++++--
arch/arm/mach-axxia/lsi_power_management.c | 753 +++++++++++-----------------
arch/arm/mach-axxia/lsi_power_management.h | 15 +-
arch/arm/mach-axxia/platsmp.c | 54 +-
8 files changed, 574 insertions(+), 521 deletions(-)
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index ee40abc..ed02265a 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1504,6 +1504,8 @@ config NR_CPUS
depends on SMP
default "4"
+menu "Support for hot-pluggable CPUs"
+
config HOTPLUG_CPU
bool "Support for hot-pluggable CPUs"
depends on SMP && HOTPLUG
@@ -1511,6 +1513,41 @@ config HOTPLUG_CPU
Say Y here to experiment with turning CPUs off and on. CPUs
can be controlled through /sys/devices/system/cpu.
+choice
+ prompt "CPU Power Down Mode"
+ default HOTPLUG_CPU_COMPLETE_POWER_DOWN
+ help
+ This is used to select how the CPU is going to be powered down. If LOW POWER
+ is selected then the CPU enters a WFI state and waits for an interrupt to
+ wake up. If COMPLETE POWER down is selected the CPU power is turned off. The only
+ way to power on the CPU is to execute a command.
+
+config HOTPLUG_CPU_COMPLETE_POWER_DOWN
+ bool "Power off the CPU"
+ help
+ This will power off the CPU completely. The irqs are migrated
+ to another CPU.
+
+config HOTPLUG_CPU_LOW_POWER
+ bool "Low Power CPU (wfi)"
+ help
+ This will put the CPU into a low power mode wfi mode. When an interrupt
+ is received the CPU will power on again.
+
+endchoice
+
+config HOTPLUG_CPU_L2_POWER_DOWN
+ bool "Power Off L2 Cache"
+ depends on HOTPLUG_CPU_COMPLETE_POWER_DOWN
+ default n if HOTPLUG_CPU_LOW_POWER
+ help
+ Select this if you want to power down the L2 cache when
+ all CPUS of a cluster have been powered off.
+
+endmenu
+
+
+
config ARM_PSCI
bool "Support for the ARM Power State Coordination Interface (PSCI)"
depends on CPU_V7
diff --git a/arch/arm/mach-axxia/axxia.c b/arch/arm/mach-axxia/axxia.c
index a0b3532..16a65e9 100644
--- a/arch/arm/mach-axxia/axxia.c
+++ b/arch/arm/mach-axxia/axxia.c
@@ -63,7 +63,6 @@ static const char *axxia_dt_match[] __initconst = {
};
static void __iomem *base;
-static void __iomem *dickens;
static void set_l3_pstate(u32 newstate)
{
@@ -200,7 +199,6 @@ void __init axxia_dt_init(void)
{
base = ioremap(0x2010000000, 0x40000);
if (!of_find_compatible_node(NULL, NULL, "lsi,axm5500-sim")) {
- dickens = ioremap(0x2000000000, SZ_4M);
#ifdef CONFIG_KEXEC
kexec_reinit = flush_l3;
#endif
diff --git a/arch/arm/mach-axxia/axxia.h b/arch/arm/mach-axxia/axxia.h
index 000adc8..c46b76d 100644
--- a/arch/arm/mach-axxia/axxia.h
+++ b/arch/arm/mach-axxia/axxia.h
@@ -5,6 +5,11 @@ void axxia_ddr_retention_init(void);
void axxia_platform_cpu_die(unsigned int cpu);
int axxia_platform_cpu_kill(unsigned int cpu);
+extern void axxia_secondary_startup(void);
+
extern struct smp_operations axxia_smp_ops;
+extern void __iomem *syscon;
+extern void __iomem *dickens;
+
#endif
diff --git a/arch/arm/mach-axxia/ddr_retention.c b/arch/arm/mach-axxia/ddr_retention.c
index bc3f79a..512bf26 100644
--- a/arch/arm/mach-axxia/ddr_retention.c
+++ b/arch/arm/mach-axxia/ddr_retention.c
@@ -32,10 +32,10 @@
#include <asm/io.h>
#include <asm/cacheflush.h>
#include <mach/ncr.h>
+#include "axxia.h"
static void __iomem *nca;
static void __iomem *apb;
-static void __iomem *dickens;
static int ddr_retention_enabled;
enum {
@@ -342,7 +342,6 @@ axxia_ddr_retention_init(void)
} else {
apb = ioremap(0x2010000000, 0x80000);
nca = ioremap(0x002020100000ULL, 0x20000);
- dickens = ioremap(0x2000000000, 0x1000000);
ddr_retention_enabled = 1;
pr_info("DDR Retention Reset Initialized\n");
}
diff --git a/arch/arm/mach-axxia/hotplug.c b/arch/arm/mach-axxia/hotplug.c
index 358d916..61235fb 100644
--- a/arch/arm/mach-axxia/hotplug.c
+++ b/arch/arm/mach-axxia/hotplug.c
@@ -11,40 +11,144 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/smp.h>
+#include <linux/of_address.h>
+#include <linux/delay.h>
+#include <mach/axxia-gic.h>
#include <asm/cacheflush.h>
#include <asm/smp_plat.h>
#include <asm/cp15.h>
#include "lsi_power_management.h"
-
extern volatile int pen_release;
-static inline void cpu_enter_lowpower_a15(void)
+static inline void pm_cpu_logical_shutdown(u32 cpu)
{
- unsigned int v;
+ u32 val;
+
+ asm volatile(
+ " mrc p15, 1, %0, c9, c0, 2\n"
+ : "=&r" (val)
+ : "Ir" (0x1)
+ : "cc");
asm volatile(
" mrc p15, 0, %0, c1, c0, 0\n"
" bic %0, %0, %1\n"
" mcr p15, 0, %0, c1, c0, 0\n"
- : "=&r" (v)
+ : "=&r" (val)
: "Ir" (CR_C)
: "cc");
+ /* Clear and invalidate all date from L1 data cache */
flush_cache_all();
+ /* Switch the processor over to AMP mode out of SMP */
asm volatile(
- /*
- * Turn off coherency
- */
- " mrc p15, 0, %0, c1, c0, 1\n"
+ " mrc p15, 0, %0, c1, c0, 1\n"
+ " bic %0, %0, %1\n"
+ " mcr p15, 0, %0, c1, c0, 1\n"
+ : "=&r" (val)
+ : "Ir" (0x40)
+ : "cc");
+
+ isb();
+ dsb();
+
+ wfi();
+
+}
+
+static inline void pm_L2_logical_shutdown(u32 cpu)
+{
+ u32 val;
+
+ asm volatile(
+ " mrc p15, 0, %0, c1, c0, 0\n"
" bic %0, %0, %1\n"
- " mcr p15, 0, %0, c1, c0, 1\n"
- : "=&r" (v)
- : "Ir" (0x40)
+ " mcr p15, 0, %0, c1, c0, 0\n"
+ : "=&r" (val)
+ : "Ir" (CR_C)
: "cc");
+
+ asm volatile(
+ /*
+ * Disable L2 prefetch
+ */
+ " mrc p15, 1, %0, c15, c0, 3\n"
+ " orr %0, %0, %1\n"
+ " mcr p15, 1, %0, c15, c0, 3\n"
+ : "=&r" (val)
+ : "Ir" (0x400)
+ : "cc");
+
+ asm volatile(
+ " mrc p15, 1, %0, c15, c0, 4\n"
+ " orr %0, %0, %1\n"
+ " mcr p15, 1, %0, c15, c0, 4\n"
+ : "=&r" (val)
+ : "Ir" (0x1)
+ : "cc");
+
+ isb();
+ dsb();
+
+ flush_cache_all();
+
+ /* Turn the DBG Double Lock quiet */
+ asm volatile(
+ /*
+ * Turn Off the DBGOSDLR.DLK bit
+ */
+ " mrc p14, 0, %0, c1, c3, 4\n"
+ " orr %0, %0, %1\n"
+ " mcr p14, 0, %0, c1, c3, 4\n"
+ : "=&r" (val)
+ : "Ir" (0x1)
+ : "cc");
+
+ /* Switch the processor over to AMP mode out of SMP */
+ asm volatile(
+ " mrc p15, 0, %0, c1, c0, 1\n"
+ " bic %0, %0, %1\n"
+ " mcr p15, 0, %0, c1, c0, 1\n"
+ : "=&r" (val)
+ : "Ir" (0x40)
+ : "cc");
+
+ isb();
+ dsb();
+
+ wfi();
+}
+
+#ifdef CONFIG_HOTPLUG_CPU_LOW_POWER
+static inline void cpu_enter_lowpower_a15(void)
+{
+ unsigned int v;
+
+ asm volatile(
+ " mrc p15, 0, %0, c1, c0, 0\n"
+ " bic %0, %0, %1\n"
+ " mcr p15, 0, %0, c1, c0, 0\n"
+ : "=&r" (v)
+ : "Ir" (CR_C)
+ : "cc");
+
+ flush_cache_all();
+
+ asm volatile(
+ /*
+ * Turn off coherency
+ */
+ " mrc p15, 0, %0, c1, c0, 1\n"
+ " bic %0, %0, %1\n"
+ " mcr p15, 0, %0, c1, c0, 1\n"
+ : "=&r" (v)
+ : "Ir" (0x40)
+ : "cc");
+
isb();
dsb();
}
@@ -54,20 +158,68 @@ static inline void cpu_leave_lowpower(void)
unsigned int v;
asm volatile(
- "mrc p15, 0, %0, c1, c0, 0\n"
+ " mrc p15, 0, %0, c1, c0, 0\n"
" orr %0, %0, %1\n"
" mcr p15, 0, %0, c1, c0, 0\n"
" mrc p15, 0, %0, c1, c0, 1\n"
" orr %0, %0, %2\n"
" mcr p15, 0, %0, c1, c0, 1\n"
- : "=&r" (v)
- : "Ir" (CR_C), "Ir" (0x40)
- : "cc");}
+ : "=&r" (v)
+ : "Ir" (CR_C), "Ir" (0x40)
+ : "cc");
+ isb();
+ dsb();
+}
+
+static void __ref platform_do_lowpower(unsigned int cpu, int *spurious)
+{
+ int phys_cpu, cluster;
+
+ /*
+ * there is no power-control hardware on this platform, so all
+ * we can do is put the core into WFI; this is safe as the calling
+ * code will have already disabled interrupts
+ */
+ for (;;) {
+ wfi();
+ /*
+ * Convert the "cpu" variable to be compatible with the
+ * ARM MPIDR register format (CLUSTERID and CPUID):
+ *
+ * Bits: |11 10 9 8|7 6 5 4 3 2|1 0
+ * | CLUSTER | Reserved |CPU
+ */
+ phys_cpu = cpu_logical_map(cpu);
+ cluster = (phys_cpu / 4) << 8;
+ phys_cpu = cluster + (phys_cpu % 4);
+
+ if (pen_release == phys_cpu) {
+ /*
+ * OK, proper wakeup, we're done
+ */
+ break;
+ }
+
+ /*
+ * Getting here, means that we have come out of WFI without
+ * having been woken up - this shouldn't happen
+ *
+ * Just note it happening - when we're woken, we can report
+ * its occurrence.
+ */
+ (*spurious)++;
+ }
+}
+#endif
int axxia_platform_cpu_kill(unsigned int cpu)
{
+#ifdef CONFIG_HOTPLUG_CPU_COMPLETE_POWER_DOWN
+ get_cpu();
pm_cpu_shutdown(cpu);
+ put_cpu();
+#endif
return 1;
}
@@ -79,33 +231,49 @@ int axxia_platform_cpu_kill(unsigned int cpu)
void axxia_platform_cpu_die(unsigned int cpu)
{
+#ifdef CONFIG_HOTPLUG_CPU_COMPLETE_POWER_DOWN
+ bool last_cpu;
- pm_data pm_request;
- int rVal = 0;
- bool lastCpu;
-
- pm_request.cpu = cpu;
- pm_request.cluster = 0;
-
-
- lastCpu = pm_cpu_last_of_cluster(cpu);
- if (lastCpu)
- rVal = pm_cpul2_logical_die(&pm_request);
+ last_cpu = pm_cpu_last_of_cluster(cpu);
+ if (last_cpu)
+ pm_L2_logical_shutdown(cpu);
else
- rVal = pm_cpu_logical_die(&pm_request);
- if (rVal)
- pr_err("CPU %d failed to die\n", cpu);
+ pm_cpu_logical_shutdown(cpu);
for (;;)
wfi();
+#else /* CPU low power mode */
+
+ int spurious = 0;
+
+ /*
+ * we're ready for shutdown now, so do it
+ */
+ cpu_enter_lowpower_a15();
+ pm_in_progress[cpu] = true;
+
+ platform_do_lowpower(cpu, &spurious);
+
+ /*
+ * bring this CPU back into the world of cache
+ * coherency, and then restore interrupts
+ */
+ cpu_leave_lowpower();
+
+ if (spurious)
+ pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);
+#endif
+
}
int platform_cpu_disable(unsigned int cpu)
{
+
/*
* we don't allow CPU 0 to be shutdown (it is still too special
* e.g. clock tick interrupts)
*/
+
return cpu == 0 ? -EPERM : 0;
}
diff --git a/arch/arm/mach-axxia/lsi_power_management.c b/arch/arm/mach-axxia/lsi_power_management.c
index fc256f2..41e7159 100644
--- a/arch/arm/mach-axxia/lsi_power_management.c
+++ b/arch/arm/mach-axxia/lsi_power_management.c
@@ -18,21 +18,21 @@
#include <linux/errno.h>
#include <linux/smp.h>
#include <linux/delay.h>
+#include <linux/of_address.h>
#include <asm/exception.h>
#include <asm/cacheflush.h>
#include <asm/smp_plat.h>
#include <asm/cp15.h>
#include "axxia.h"
+#include <mach/axxia-gic.h>
#include "lsi_power_management.h"
#undef DEBUG_CPU_PM
-#define SYSCON_PHYS_ADDR 0x002010030000ULL
-#define DICKENS_PHYS_ADDR 0x2000000000
-
#define PM_WAIT_TIME (10000)
#define MAX_CLUSTER (4)
+#define IPI_IRQ_MASK (0xFFFF)
#define CHECK_BIT(var, pos) ((var) & (1 << (pos)))
@@ -50,24 +50,66 @@ PORESET_CLUSTER1,
PORESET_CLUSTER2,
PORESET_CLUSTER3 };
-static u32 pm_cpu_powered_down;
+static const u32 cluster_to_mask[MAX_CLUSTER] = {
+ IPI0_MASK,
+ IPI1_MASK,
+ IPI2_MASK,
+ IPI3_MASK
+};
+
+static const u32 ipi_register[MAX_IPI] = {
+ NCP_SYSCON_MASK_IPI0,
+ NCP_SYSCON_MASK_IPI1,
+ NCP_SYSCON_MASK_IPI2,
+ NCP_SYSCON_MASK_IPI3,
+ NCP_SYSCON_MASK_IPI4,
+ NCP_SYSCON_MASK_IPI5,
+ NCP_SYSCON_MASK_IPI6,
+ NCP_SYSCON_MASK_IPI7,
+ NCP_SYSCON_MASK_IPI8,
+ NCP_SYSCON_MASK_IPI9,
+ NCP_SYSCON_MASK_IPI10,
+ NCP_SYSCON_MASK_IPI11,
+ NCP_SYSCON_MASK_IPI12,
+ NCP_SYSCON_MASK_IPI13,
+ NCP_SYSCON_MASK_IPI14,
+ NCP_SYSCON_MASK_IPI15,
+ NCP_SYSCON_MASK_IPI16,
+ NCP_SYSCON_MASK_IPI17,
+ NCP_SYSCON_MASK_IPI18
+};
+
+enum pm_error_code {
+ PM_ERR_DICKENS_IOREMAP = 200,
+ PM_ERR_DICKENS_SNOOP_DOMAIN,
+ PM_ERR_FAILED_PWR_DWN_RAM,
+ PM_ERR_FAILED_STAGE_1,
+ PM_ERR_ACK1_FAIL,
+ PM_ERR_RAM_ACK_FAIL,
+ PM_ERR_FAIL_L2ACK,
+ PM_ERR_FAIL_L2HSRAM
+};
+
+u32 pm_cpu_powered_down;
/*======================= LOCAL FUNCTIONS ==============================*/
-static void pm_set_bits_syscon_register(void __iomem *syscon, u32 reg, u32 data);
-static void pm_clear_bits_syscon_register(void __iomem *syscon, u32 reg, u32 data);
-static bool pm_test_for_bit_with_timeout(void __iomem *syscon, u32 reg, u32 bit);
-static bool pm_wait_for_bit_clear_with_timeout(void __iomem *syscon, u32 reg, u32 bit);
+static void pm_set_bits_syscon_register(u32 reg, u32 data);
+static void pm_or_bits_syscon_register(u32 reg, u32 data);
+static void pm_clear_bits_syscon_register(u32 reg, u32 data);
+static bool pm_test_for_bit_with_timeout(u32 reg, u32 bit);
+static bool pm_wait_for_bit_clear_with_timeout(u32 reg, u32 bit);
static void pm_dickens_logical_shutdown(u32 cluster);
static int pm_dickens_logical_powerup(u32 cluster);
static int pm_cpu_physical_isolation_and_power_down(int cpu);
static void pm_L2_isolation_and_power_down(int cluster);
-static void __pm_cpu_shutdown(void *data);
static int pm_cpu_physical_connection_and_power_up(int cpu);
static int pm_L2_physical_connection_and_power_up(u32 cluster);
static int pm_L2_logical_powerup(u32 cluster, u32 cpu);
static bool pm_first_cpu_of_cluster(u32 cpu)
{
+#ifdef CONFIG_HOTPLUG_CPU_L2_POWER_DOWN
+
u32 count = 0;
switch (cpu) {
@@ -127,11 +169,14 @@ static bool pm_first_cpu_of_cluster(u32 cpu)
__LINE__);
break;
}
+#endif
return false;
}
bool pm_cpu_last_of_cluster(u32 cpu)
{
+#ifdef CONFIG_HOTPLUG_CPU_L2_POWER_DOWN
+
u32 count = 0;
switch (cpu) {
@@ -191,10 +236,16 @@ bool pm_cpu_last_of_cluster(u32 cpu)
__LINE__);
break;
}
+#endif
return false;
}
-static void pm_set_bits_syscon_register(void __iomem *syscon, u32 reg, u32 data)
+static void pm_set_bits_syscon_register(u32 reg, u32 data)
+{
+ writel(data, syscon + reg);
+}
+
+static void pm_or_bits_syscon_register(u32 reg, u32 data)
{
u32 tmp;
@@ -203,7 +254,8 @@ static void pm_set_bits_syscon_register(void __iomem *syscon, u32 reg, u32 data)
writel(tmp, syscon + reg);
}
-static void pm_clear_bits_syscon_register(void __iomem *syscon, u32 reg, u32 data)
+
+static void pm_clear_bits_syscon_register(u32 reg, u32 data)
{
u32 tmp;
@@ -212,7 +264,7 @@ static void pm_clear_bits_syscon_register(void __iomem *syscon, u32 reg, u32 dat
writel(tmp, syscon + reg);
}
-static bool pm_test_for_bit_with_timeout(void __iomem *syscon, u32 reg, u32 bit)
+static bool pm_test_for_bit_with_timeout(u32 reg, u32 bit)
{
u32 tmp = 0;
@@ -231,8 +283,7 @@ static bool pm_test_for_bit_with_timeout(void __iomem *syscon, u32 reg, u32 bit)
return true;
}
-static bool pm_wait_for_bit_clear_with_timeout(void __iomem *syscon, u32 reg,
- u32 bit)
+static bool pm_wait_for_bit_clear_with_timeout(u32 reg, u32 bit)
{
u32 cnt = 0;
u32 tmp = 0;
@@ -257,13 +308,6 @@ static void pm_dickens_logical_shutdown(u32 cluster)
u32 bit;
u32 bit_pos;
int retries;
- void __iomem *dickens;
-
- dickens = ioremap(DICKENS_PHYS_ADDR, SZ_4M);
- if (dickens == NULL) {
- pr_err("DICKENS: Failed to map the dickens registers\n");
- return;
- }
bit = (0x01 << cluster_to_node[cluster]);
bit_pos = cluster_to_node[cluster];
@@ -284,7 +328,7 @@ static void pm_dickens_logical_shutdown(u32 cluster)
if (0 == retries) {
pr_err("DICKENS: Failed to clear the SNOOP main control. LOOP:%d reg: 0x%x\n", i, status);
- goto dickens_power_down;
+ return;
}
@@ -304,12 +348,9 @@ static void pm_dickens_logical_shutdown(u32 cluster)
if (0 == retries) {
pr_err("DICKENS: failed to set DOMAIN OFFSET Reg=0x%x\n", status);
- goto dickens_power_down;
+ return;
}
-
-dickens_power_down:
- iounmap(dickens);
}
static int pm_dickens_logical_powerup(u32 cluster)
@@ -321,12 +362,6 @@ static int pm_dickens_logical_powerup(u32 cluster)
int retries;
int rval = 0;
- void __iomem *dickens = ioremap(DICKENS_PHYS_ADDR, SZ_4M);
-
- if (dickens == NULL) {
- pr_err("Failed to map dickens registers\n");
- return -EINVAL;
- }
bit = (0x01 << cluster_to_node[cluster]);
bit_pos = cluster_to_node[cluster];
@@ -347,8 +382,7 @@ static int pm_dickens_logical_powerup(u32 cluster)
if (0 == retries) {
pr_err("DICKENS: Failed on the SNOOP DONAIN\n");
- rval = -EINVAL;
- goto dickens_power_up;
+ return -PM_ERR_DICKENS_SNOOP_DOMAIN;
}
}
@@ -367,213 +401,129 @@ static int pm_dickens_logical_powerup(u32 cluster)
} while ((0 < --retries) && !CHECK_BIT(status, bit_pos));
if (0 == retries) {
- pr_err("DICKENS: Failed on the SNOOP DONAIN\n");
- rval = -EINVAL;
- goto dickens_power_up;
+ pr_err("DICKENS: Failed on the SNOOP DONAIN CTL SET\n");
+ return -PM_ERR_DICKENS_SNOOP_DOMAIN;
}
-dickens_power_up:
- iounmap(dickens);
-
return rval;
}
-static void __pm_cpu_shutdown(void *data)
+static void pm_disable_ipi_interrupts(u32 cpu)
+{
+ pm_clear_bits_syscon_register(ipi_register[cpu], IPI_IRQ_MASK);
+}
+
+static void pm_enable_ipi_interrupts(u32 cpu)
+{
+
+ u32 i;
+ u32 powered_on_cpu = (~(pm_cpu_powered_down) & IPI_IRQ_MASK);
+
+ pm_set_bits_syscon_register(ipi_register[cpu], powered_on_cpu);
+
+ for (i = 0; i < MAX_CPUS; i++) {
+ if ((1 << i) & powered_on_cpu)
+ pm_or_bits_syscon_register(ipi_register[i], (1 << cpu));
+ }
+}
+
+bool pm_cpu_active(u32 cpu)
+{
+
+ bool success = false;
+ u32 reg;
+
+ reg = readl(syscon + NCP_SYSCON_PWR_QACTIVE);
+ if (reg & (1 << cpu))
+ success = true;
+
+ return success;
+
+}
+
+void pm_cpu_shutdown(u32 cpu)
{
- pm_data *pm_request = (pm_data *)data;
- void __iomem *syscon;
bool success;
- u32 cluster_mask = (0x01 << pm_request->cluster);
+ u32 reqcpu = cpu_logical_map(cpu);
+ u32 cluster = reqcpu / CORES_PER_CLUSTER;
+ u32 cluster_mask = (0x01 << cluster);
bool last_cpu;
int rval = 0;
+ /* Check to see if the cpu is powered up */
+ if (pm_cpu_powered_down & (1 << reqcpu)) {
+ pr_err("CPU %d is already powered off - %s:%d\n", cpu, __FILE__, __LINE__);
+ return;
+ }
+
/*
* Is this the last cpu of a cluster then turn off the L2 cache
* along with the CPU.
*/
- last_cpu = pm_cpu_last_of_cluster(pm_request->cpu);
+ last_cpu = pm_cpu_last_of_cluster(reqcpu);
if (last_cpu) {
+ /* Disable all the interrupts to the cluster gic */
+ pm_or_bits_syscon_register(NCP_SYSCON_GIC_DISABLE, cluster_mask);
+
/* Remove the cluster from the Dickens coherency domain */
- pm_dickens_logical_shutdown(pm_request->cluster);
+ pm_dickens_logical_shutdown(cluster);
/* Power down the cpu */
- pm_cpu_physical_isolation_and_power_down(pm_request->cpu);
-
- syscon = ioremap(SYSCON_PHYS_ADDR, SZ_64K);
- if (WARN_ON(!syscon))
- return;
-
-#if 0
- pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_CSYSREQ_TS, cluster_mask);
- success = pm_wait_for_bit_clear_with_timeout(syscon, NCP_SYSCON_PWR_CACTIVE_TS, pm_request->cluster);
- if (!success) {
- pr_err(
- "Failed to keep other cluster TS going on cluster %d: %s-%d\n",
- pm_request->cluster, __FILE__, __LINE__);
- iounmap(syscon);
- return;
- }
+ pm_cpu_physical_isolation_and_power_down(reqcpu);
- pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_CSYSREQ_ATB, cluster_mask);
- success = pm_wait_for_bit_clear_with_timeout(syscon, NCP_SYSCON_PWR_CACTIVE_ATB, pm_request->cluster);
- if (!success) {
- pr_err(
- "Failed to keep other cluster ATB going on cluster %d: %s-%d\n",
- pm_request->cluster, __FILE__, __LINE__);
- iounmap(syscon);
- return;
- }
-
- pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_CSYSREQ_APB, cluster_mask);
- success = pm_wait_for_bit_clear_with_timeout(syscon, NCP_SYSCON_PWR_CACTIVE_APB, pm_request->cluster);
- if (!success) {
- pr_err(
- "Failed to keep other cluster APB going on cluster %d: %s-%d\n",
- pm_request->cluster, __FILE__, __LINE__);
- iounmap(syscon);
- return;
- }
-#endif
- pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_CSYSREQ_CNT, cluster_mask);
- success = pm_wait_for_bit_clear_with_timeout(syscon, NCP_SYSCON_PWR_CACTIVE_CNT, pm_request->cluster);
+ pm_clear_bits_syscon_register(NCP_SYSCON_PWR_CSYSREQ_CNT, cluster_mask);
+ success = pm_wait_for_bit_clear_with_timeout(NCP_SYSCON_PWR_CACTIVE_CNT, cluster);
if (!success) {
pr_err(
"Failed to keep other cluster count going on cluster %d: %s-%d\n",
- pm_request->cluster, __FILE__, __LINE__);
- iounmap(syscon);
- return;
+ cluster, __FILE__, __LINE__);
+ goto pm_shutdown_exit;
}
/* Turn off the ACE */
- pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWR_ACEPWRDNRQ, cluster_mask);
+ pm_or_bits_syscon_register(NCP_SYSCON_PWR_ACEPWRDNRQ, cluster_mask);
/* Wait for ACE to complete power off */
- success = pm_wait_for_bit_clear_with_timeout(syscon, NCP_SYSCON_PWR_NACEPWRDNACK, pm_request->cluster);
+ success = pm_wait_for_bit_clear_with_timeout(NCP_SYSCON_PWR_NACEPWRDNACK, cluster);
if (!success) {
pr_err("Failed to power off ACE on cluster %d: %s-%d\n",
- pm_request->cluster, __FILE__, __LINE__);
- iounmap(syscon);
- return;
+ cluster, __FILE__, __LINE__);
+ goto pm_shutdown_exit;
}
/* Isolate the cluster */
- pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWR_ISOLATEL2MISC, cluster_mask);
+ pm_or_bits_syscon_register(NCP_SYSCON_PWR_ISOLATEL2MISC, cluster_mask);
/* Wait for WFI L2 to go to standby */
- success = pm_test_for_bit_with_timeout(syscon, NCP_SYSCON_PWR_STANDBYWFIL2, pm_request->cluster);
+ success = pm_test_for_bit_with_timeout(NCP_SYSCON_PWR_STANDBYWFIL2, cluster);
if (!success) {
pr_err("Failed to enter L2 WFI on cluster %d: %s-%d\n",
- pm_request->cluster, __FILE__, __LINE__);
- iounmap(syscon);
- return;
+ cluster, __FILE__, __LINE__);
+ goto pm_shutdown_exit;
}
- iounmap(syscon);
-
/* Power off the L2 */
- pm_L2_isolation_and_power_down(pm_request->cluster);
+ pm_L2_isolation_and_power_down(cluster);
if (rval == 0) {
- pr_info("CPU %d is powered down with cluster: %d\n", pm_request->cpu, pm_request->cluster);
- pm_cpu_powered_down |= (1 << pm_request->cpu);
+ pr_info("CPU %d is powered down with cluster: %d\n", reqcpu, cluster);
+ pm_cpu_powered_down |= (1 << reqcpu);
} else
- pr_err("CPU %d failed to power down\n", pm_request->cpu);
+ pr_err("CPU %d failed to power down\n", reqcpu);
} else {
- rval = pm_cpu_physical_isolation_and_power_down(pm_request->cpu);
+ rval = pm_cpu_physical_isolation_and_power_down(reqcpu);
if (rval == 0)
- pm_cpu_powered_down |= (1 << pm_request->cpu);
+ pm_cpu_powered_down |= (1 << reqcpu);
else
- pr_err("CPU %d failed to power down\n", pm_request->cpu);
+ pr_err("CPU %d failed to power down\n", reqcpu);
}
-}
-
-int pm_cpu_logical_die(pm_data *pm_request)
-{
- void __iomem *syscon;
- bool success;
-
- smp_call_function_single(pm_request->cpu, pm_cpu_logical_shutdown, (void *)pm_request, 1);
-
- syscon = ioremap(SYSCON_PHYS_ADDR, SZ_64K);
- if (WARN_ON(!syscon))
- return -EINVAL;
-
- /* Wait for the cpu to enter wfi */
- success = pm_test_for_bit_with_timeout(syscon, NCP_SYSCON_PWR_STANDBYWFI, pm_request->cpu);
- if (!success) {
- pr_err("Failed to enter WFI mode on cpu %d: %s-%d\n",
- pm_request->cpu, __FILE__, __LINE__);
- iounmap(syscon);
- return -EINVAL;
- }
-
- iounmap(syscon);
- return 0;
-}
-
-int pm_cpul2_logical_die(pm_data *pm_request)
-{
- void __iomem *syscon;
- bool success;
-
- smp_call_function_single(pm_request->cpu, pm_L2_logical_shutdown, (void *)pm_request, 1);
-
- syscon = ioremap(SYSCON_PHYS_ADDR, SZ_64K);
- if (WARN_ON(!syscon))
- return -EINVAL;
-
- /* Wait for the cpu to enter wfi */
- success = pm_test_for_bit_with_timeout(syscon, NCP_SYSCON_PWR_STANDBYWFI, pm_request->cpu);
- if (!success) {
- pr_err("Failed to enter WFI mode on cpu %d: %s-%d\n",
- pm_request->cpu, __FILE__, __LINE__);
- iounmap(syscon);
- return -EINVAL;
- }
-
- iounmap(syscon);
- return 0;
-}
-
-void pm_cpu_shutdown(u32 cpu)
-{
-
- pm_data pm_request;
-
- u32 pcpu = cpu_logical_map(smp_processor_id());
- u32 rcpu = cpumask_any_and(cpu_present_mask, cpu_online_mask);
- u32 reqcpu = cpu_logical_map(cpu);
-
- /* Check to see if the cpu is powered up */
- if (pm_cpu_powered_down & (1 << cpu)) {
- pr_err("CPU %d is already powered off - %s:%d\n", cpu, __FILE__, __LINE__);
- return;
- }
- /*
- * Is this the last cpu to be powered off, then don't
- * allow the power to be shut off.
- */
- if (cpu == 0) {
- pr_err("Cannot turn off cpu 0 - %s:%d\n", __FILE__, __LINE__);
- return;
- }
-
- /*
- * Is this process on the requested cpu to power down
- * then send it to another cpu for processing
- */
- pm_request.cpu = cpu;
- pm_request.cluster = reqcpu / CORES_PER_CLUSTER;
-
- if (pcpu == cpu)
- smp_call_function_single(rcpu, __pm_cpu_shutdown, (void *)&pm_request, 0);
- else
- __pm_cpu_shutdown(&pm_request);
+pm_shutdown_exit:
+ return;
}
@@ -582,25 +532,12 @@ int pm_cpu_powerup(u32 cpu)
bool first_cpu;
int rval = 0;
- void __iomem *syscon = NULL;
u32 cpu_mask = (0x01 << cpu);
u32 reqcpu = cpu_logical_map(cpu);
u32 cluster = reqcpu / CORES_PER_CLUSTER;
- /* Hold the CPU in reset */
- syscon = ioremap(SYSCON_PHYS_ADDR, SZ_64K);
- if (WARN_ON(!syscon))
- return -EINVAL;
-
- /*
- * The key value has to be written before the CPU RST can be written.
- */
- pm_set_bits_syscon_register(syscon, NCP_SYSCON_KEY, VALID_KEY_VALUE);
- pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWRUP_CPU_RST, cpu_mask);
-
- /* Hold the CPU in reset */
- iounmap(syscon);
+ u32 cluster_mask = (0x01 << cluster);
/*
* Is this the first cpu of a cluster to come back on?
@@ -609,13 +546,21 @@ int pm_cpu_powerup(u32 cpu)
first_cpu = pm_first_cpu_of_cluster(cpu);
if (first_cpu) {
-
rval = pm_L2_logical_powerup(cluster, cpu);
- if (rval) {
+ if (rval)
+ pr_info("CPU %d is powered up with cluster: %d\n", reqcpu, cluster);
+ else {
pr_err("CPU: Failed the logical L2 power up\n");
- return rval;
+ goto pm_power_up;
}
cluster_power_up[cluster] = true;
+ pm_clear_bits_syscon_register(NCP_SYSCON_GIC_DISABLE, cluster_mask);
+
+
+ } else {
+ /* Set the CPU into reset */
+ pm_set_bits_syscon_register(NCP_SYSCON_KEY, VALID_KEY_VALUE);
+ pm_or_bits_syscon_register(NCP_SYSCON_PWRUP_CPU_RST, cpu_mask);
}
@@ -628,33 +573,21 @@ int pm_cpu_powerup(u32 cpu)
goto pm_power_up;
}
- udelay(16);
-
- /* Clear the CPU from reset and let it go */
- syscon = ioremap(SYSCON_PHYS_ADDR, SZ_64K);
- if (WARN_ON(!syscon))
- return -EINVAL;
-
/*
* The key value must be written before the CPU RST can be written.
*/
- pm_set_bits_syscon_register(syscon, NCP_SYSCON_KEY, VALID_KEY_VALUE);
- pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWRUP_CPU_RST, cpu_mask);
-
- /*
- * The key value must be written before HOLD CPU can be written.
- */
- pm_set_bits_syscon_register(syscon, NCP_SYSCON_KEY, VALID_KEY_VALUE);
- pm_clear_bits_syscon_register(syscon, NCP_SYSCON_HOLD_CPU, cpu_mask);
+ pm_set_bits_syscon_register(NCP_SYSCON_KEY, VALID_KEY_VALUE);
+ pm_clear_bits_syscon_register(NCP_SYSCON_PWRUP_CPU_RST, cpu_mask);
/*
* Clear the powered down mask
*/
pm_cpu_powered_down &= ~(1 << cpu);
+ /* Enable the CPU IPI */
+ pm_enable_ipi_interrupts(cpu);
pm_power_up:
- iounmap(syscon);
return rval;
}
@@ -663,240 +596,172 @@ unsigned long pm_get_powered_down_cpu(void)
return pm_cpu_powered_down;
}
-void pm_cpu_logical_shutdown(void *data)
+inline void pm_cpu_logical_powerup(void)
{
- u32 val;
-
- asm volatile(
- " mrc p15, 1, %0, c9, c0, 2\n"
- : "=&r" (val)
- : "Ir" (0x1)
- : "cc");
+ unsigned int v;
asm volatile(
- " mrc p15, 0, %0, c1, c0, 0\n"
- " bic %0, %0, %1\n"
- " mcr p15, 0, %0, c1, c0, 0\n"
- : "=&r" (val)
- : "Ir" (CR_C)
- : "cc");
+ " mrc p15, 0, %0, c1, c0, 0\n"
+ " orr %0, %0, %1\n"
+ " mcr p15, 0, %0, c1, c0, 0\n"
+ " mrc p15, 0, %0, c1, c0, 0\n"
+ " orr %0, %0, %2\n"
+ " mcr p15, 0, %0, c1, c0, 0\n"
+ : "=&r" (v)
+ : "Ir" (CR_C), "Ir" (CR_I)
+ : "cc");
- /* Clear and invalidate all date from L1 data cache */
- flush_cache_all();
+ /*
+ * Iniitalize the ACTLR2 register (all cores).
+ */
- /* Switch the processor over to AMP mode out of SMP */
asm volatile(
- " mrc p15, 0, %0, c1, c0, 1\n"
- " bic %0, %0, %1\n"
- " mcr p15, 0, %0, c1, c0, 1\n"
- : "=&r" (val)
- : "Ir" (0x40)
+ " mrc p15, 1, %0, c15, c0, 4\n"
+ " bic %0, %0, %1\n"
+ " mcr p15, 1, %0, c15, c0, 4\n"
+ : "=&r" (v)
+ : "Ir" (0x1)
: "cc");
isb();
dsb();
-
- wfi();
-
- return;
-
}
-void pm_cpu_logical_powerup(void)
+inline void pm_cluster_logical_powerup(void)
{
unsigned int v;
+ /*
+ * Initialize the L2CTLR register (primary core in each cluster).
+ */
asm volatile(
- " mrc p15, 0, %0, c1, c0, 0\n"
+ " mrc p15, 1, %0, c9, c0, 2\n"
" orr %0, %0, %1\n"
- " mcr p15, 0, %0, c1, c0, 0\n"
- " mrc p15, 0, %0, c1, c0, 0\n"
" orr %0, %0, %2\n"
- " mcr p15, 0, %0, c1, c0, 0\n"
- " mrc p15, 0, %0, c1, c0, 1\n"
- " orr %0, %0, %3\n"
- " mcr p15, 0, %0, c1, c0, 1\n"
+ " mcr p15, 1, %0, c9, c0, 2"
: "=&r" (v)
- : "Ir" (CR_C), "Ir" (CR_I), "Ir" (0x40)
+ : "Ir" (0x01), "Ir" (0x1 << 21)
: "cc");
- asm volatile(
- " mrc p15, 1, %0, c9, c0, 2\n"
- : "=&r" (v)
- : "Ir" (0x1)
- : "cc");
+ isb();
+ dsb();
+ /*
+ * Initialize the L2ACTLR register (primary core in each cluster).
+ */
+ asm volatile(
+ " mrc p15, 1, r0, c15, c0, 0\n"
+ " orr %0, %0, %1\n"
+ " orr %0, %0, %2\n"
+ " orr %0, %0, %3\n"
+ " orr %0, %0, %4\n"
+ " orr %0, %0, %5\n"
+ " mcr p15, 1, %0, c15, c0, 0"
+ : "=&r" (v)
+ : "Ir" (0x1 << 3), "Ir" (0x1 << 7), "Ir" (0x1 << 12), "Ir" (0x1 << 13), "Ir" (0x1 << 14)
+ : "cc");
+ isb();
+ dsb();
}
static int pm_cpu_physical_isolation_and_power_down(int cpu)
{
- void __iomem *syscon;
int rval = 0;
bool success;
u32 mask = (0x01 << cpu);
- syscon = ioremap(SYSCON_PHYS_ADDR, SZ_64K);
- if (WARN_ON(!syscon))
- return -EINVAL;
+ /* Disable the CPU IPI */
+ pm_disable_ipi_interrupts(cpu);
/* Initiate power down of the CPU's HS Rams */
- pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPCPURAM, mask);
+ pm_clear_bits_syscon_register(NCP_SYSCON_PWR_PWRUPCPURAM, mask);
/* Wait until the RAM power down is complete */
- success = pm_test_for_bit_with_timeout(syscon, NCP_SYSCON_PWR_NPWRUPCPURAM_ACK, cpu);
+ success = pm_test_for_bit_with_timeout(NCP_SYSCON_PWR_NPWRUPCPURAM_ACK, cpu);
if (!success) {
- rval = -EINVAL;
+ rval = -PM_ERR_FAILED_PWR_DWN_RAM;
pr_err("CPU: Failed to power down CPU RAM\n");
goto power_down_cleanup;
}
/* Activate the CPU's isolation clamps */
- pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWR_ISOLATECPU, mask);
+ pm_or_bits_syscon_register(NCP_SYSCON_PWR_ISOLATECPU, mask);
/* Initiate power down of the CPU logic */
- pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPCPUSTG2, mask);
+ pm_clear_bits_syscon_register(NCP_SYSCON_PWR_PWRUPCPUSTG2, mask);
- udelay(10);
+ udelay(16);
/* Continue power down of the CPU logic */
- pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPCPUSTG1, mask);
+ pm_clear_bits_syscon_register(NCP_SYSCON_PWR_PWRUPCPUSTG1, mask);
- success = pm_test_for_bit_with_timeout(syscon, NCP_SYSCON_PWR_NPWRUPCPUSTG1_ACK, cpu);
+ success = pm_test_for_bit_with_timeout(NCP_SYSCON_PWR_NPWRUPCPUSTG1_ACK, cpu);
if (!success) {
- rval = -EINVAL;
+ rval = -PM_ERR_FAILED_STAGE_1;
pr_err("CPU: Failed to power down stage 1 cpu\n");
goto power_down_cleanup;
}
power_down_cleanup:
- iounmap(syscon);
return rval;
}
static int pm_cpu_physical_connection_and_power_up(int cpu)
{
int rval = 0;
- void __iomem *syscon;
+
bool success;
u32 mask = (0x01 << cpu);
- syscon = ioremap(SYSCON_PHYS_ADDR, SZ_64K);
- if (WARN_ON(!syscon))
- return -EINVAL;
-
/* Initiate power up of the CPU */
- pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPCPUSTG1, mask);
+ pm_or_bits_syscon_register(NCP_SYSCON_PWR_PWRUPCPUSTG1, mask);
/* Wait until CPU logic power is compete */
- success = pm_wait_for_bit_clear_with_timeout(syscon, NCP_SYSCON_PWR_NPWRUPCPUSTG1_ACK, cpu);
+ success = pm_wait_for_bit_clear_with_timeout(NCP_SYSCON_PWR_NPWRUPCPUSTG1_ACK, cpu);
if (!success) {
- rval = -EINVAL;
+ rval = -PM_ERR_ACK1_FAIL;
pr_err("CPU: Failed to get ACK from power down stage 1\n");
goto power_up_cleanup;
}
/* Continue stage 2 power up of the CPU*/
- pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPCPUSTG2, mask);
+ pm_or_bits_syscon_register(NCP_SYSCON_PWR_PWRUPCPUSTG2, mask);
- udelay(10);
+ udelay(16);
/* Initiate power up of HS Rams */
- pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPCPURAM, mask);
+ pm_or_bits_syscon_register(NCP_SYSCON_PWR_PWRUPCPURAM, mask);
/* Wait until the RAM power up is complete */
- success = pm_wait_for_bit_clear_with_timeout(syscon, NCP_SYSCON_PWR_NPWRUPCPURAM_ACK, cpu);
+ success = pm_wait_for_bit_clear_with_timeout(NCP_SYSCON_PWR_NPWRUPCPURAM_ACK, cpu);
if (!success) {
- rval = -EINVAL;
+ rval = -PM_ERR_RAM_ACK_FAIL;
pr_err("CPU: Failed to get ACK of power power up\n");
goto power_up_cleanup;
}
/* Release the CPU's isolation clamps */
- pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_ISOLATECPU, mask);
+ pm_clear_bits_syscon_register(NCP_SYSCON_PWR_ISOLATECPU, mask);
+
+ udelay(16);
power_up_cleanup:
- iounmap(syscon);
return rval;
}
/*========================================== L2 FUNCTIONS ========================================*/
-void pm_L2_logical_shutdown(void *data)
-{
- u32 val;
-
-
- asm volatile(
- " mrc p15, 0, %0, c1, c0, 0\n"
- " bic %0, %0, %1\n"
- " mcr p15, 0, %0, c1, c0, 0\n"
- : "=&r" (val)
- : "Ir" (CR_C)
- : "cc");
-
-
- asm volatile(
- /*
- * Disable L2 prefetch
- */
- " mrc p15, 1, %0, c15, c0, 3\n"
- " orr %0, %0, %1\n"
- " mcr p15, 1, %0, c15, c0, 3\n"
- : "=&r" (val)
- : "Ir" (0x400)
- : "cc");
-
- isb();
- dsb();
-
- /* Clear and invalidate all L1 and L2 data cache */
- flush_cache_all();
-
-
- /* Turn the DBG Double Lock quiet */
- asm volatile(
- /*
- * Turn Off the DBGOSDLR.DLK bit
- */
- " mrc p14, 0, %0, c1, c3, 4\n"
- " orr %0, %0, %1\n"
- " mcr p14, 0, %0, c1, c3, 4\n"
- : "=&r" (val)
- : "Ir" (0x1)
- : "cc");
-
- /* Switch the processor over to AMP mode out of SMP */
- asm volatile(
- " mrc p15, 0, %0, c1, c0, 1\n"
- " bic %0, %0, %1\n"
- " mcr p15, 0, %0, c1, c0, 1\n"
- : "=&r" (val)
- : "Ir" (0x40)
- : "cc");
-
- isb();
- dsb();
-
- wfi();
-}
-
static void pm_L2_isolation_and_power_down(int cluster)
{
- void __iomem *syscon;
u32 mask = (0x1 << cluster);
-
- syscon = ioremap(SYSCON_PHYS_ADDR, SZ_64K);
- if (WARN_ON(!syscon))
- return;
-
/* Enable the chip select for the cluster */
- pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWR_CHIPSELECTEN, mask);
+ pm_or_bits_syscon_register(NCP_SYSCON_PWR_CHIPSELECTEN, mask);
/* Disable the hsram */
- pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL2HSRAM, mask);
+ pm_clear_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL2HSRAM, mask);
switch (cluster) {
case (0):
@@ -917,11 +782,11 @@ static void pm_L2_isolation_and_power_down(int cluster)
NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM2, RAM_BANK3_MASK);
udelay(20);
#else
- pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM2, RAM_ALL_MASK);
+ pm_clear_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM2, RAM_ALL_MASK);
udelay(20);
- pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM1, RAM_ALL_MASK);
+ pm_clear_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM1, RAM_ALL_MASK);
udelay(20);
- pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM0, RAM_ALL_MASK);
+ pm_clear_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM0, RAM_ALL_MASK);
udelay(20);
#endif
@@ -945,11 +810,11 @@ static void pm_L2_isolation_and_power_down(int cluster)
NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM2, RAM_BANK3_MASK);
udelay(20);
#else
- pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM2, RAM_ALL_MASK);
+ pm_clear_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM2, RAM_ALL_MASK);
udelay(20);
- pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM1, RAM_ALL_MASK);
+ pm_clear_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM1, RAM_ALL_MASK);
udelay(20);
- pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM0, RAM_ALL_MASK);
+ pm_clear_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM0, RAM_ALL_MASK);
udelay(20);
#endif
break;
@@ -972,11 +837,11 @@ static void pm_L2_isolation_and_power_down(int cluster)
NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM2, RAM_BANK3_MASK);
udelay(20);
#else
- pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM2, RAM_ALL_MASK);
+ pm_clear_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM2, RAM_ALL_MASK);
udelay(20);
- pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM1, RAM_ALL_MASK);
+ pm_clear_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM1, RAM_ALL_MASK);
udelay(20);
- pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM0, RAM_ALL_MASK);
+ pm_clear_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM0, RAM_ALL_MASK);
udelay(20);
#endif
break;
@@ -999,11 +864,11 @@ static void pm_L2_isolation_and_power_down(int cluster)
NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM2, RAM_BANK3_MASK);
udelay(20);
#else
- pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM2, RAM_ALL_MASK);
+ pm_clear_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM2, RAM_ALL_MASK);
udelay(20);
- pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM1, RAM_ALL_MASK);
+ pm_clear_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM1, RAM_ALL_MASK);
udelay(20);
- pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM0, RAM_ALL_MASK);
+ pm_clear_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM0, RAM_ALL_MASK);
udelay(20);
#endif
break;
@@ -1013,51 +878,45 @@ static void pm_L2_isolation_and_power_down(int cluster)
}
/* Power down stage 2 */
- pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL2LGCSTG2, mask);
+ pm_clear_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL2LGCSTG2, mask);
/* Power down stage 1 */
- pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL2LGCSTG1, mask);
+ pm_clear_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL2LGCSTG1, mask);
-
- iounmap(syscon);
}
static int pm_L2_physical_connection_and_power_up(u32 cluster)
{
- void __iomem *syscon;
+
bool success;
u32 mask = (0x1 << cluster);
int rval = 0;
- syscon = ioremap(SYSCON_PHYS_ADDR, SZ_64K);
- if (WARN_ON(!syscon))
- return -EINVAL;
-
/* Power up stage 1 */
- pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL2LGCSTG1, mask);
+ pm_or_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL2LGCSTG1, mask);
/* Wait for the stage 1 power up to complete */
- success = pm_wait_for_bit_clear_with_timeout(syscon, NCP_SYSCON_PWR_NPWRUPL2LGCSTG1_ACK, cluster);
+ success = pm_wait_for_bit_clear_with_timeout(NCP_SYSCON_PWR_NPWRUPL2LGCSTG1_ACK, cluster);
if (!success) {
pr_err("CPU: Failed to ack the L2 Stage 1 Power up\n");
- rval = -EINVAL;
+ rval = -PM_ERR_FAIL_L2ACK;
goto power_up_l2_cleanup;
}
/* Power on stage 2 */
- pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL2LGCSTG2, mask);
+ pm_or_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL2LGCSTG2, mask);
/* Set the chip select */
- pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWR_CHIPSELECTEN, mask);
+ pm_or_bits_syscon_register(NCP_SYSCON_PWR_CHIPSELECTEN, mask);
- /* Power up the snoop ramram */
- pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL2HSRAM, mask);
+ /* Power up the snoop ram */
+ pm_or_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL2HSRAM, mask);
/* Wait for the stage 1 power up to complete */
- success = pm_wait_for_bit_clear_with_timeout(syscon, NCP_SYSCON_PWR_NPWRUPL2HSRAM_ACK, cluster);
+ success = pm_wait_for_bit_clear_with_timeout(NCP_SYSCON_PWR_NPWRUPL2HSRAM_ACK, cluster);
if (!success) {
pr_err("CPU: failed to get the HSRAM power up ACK\n");
- rval = -EINVAL;
+ rval = -PM_ERR_FAIL_L2HSRAM;
goto power_up_l2_cleanup;
}
@@ -1080,11 +939,11 @@ static int pm_L2_physical_connection_and_power_up(u32 cluster)
NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM2, RAM_BANK3_MASK);
udelay(20);
#else
- pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM2, RAM_ALL_MASK);
+ pm_set_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM2, RAM_ALL_MASK);
udelay(20);
- pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM1, RAM_ALL_MASK);
+ pm_set_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM1, RAM_ALL_MASK);
udelay(20);
- pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM0, RAM_ALL_MASK);
+ pm_set_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM0, RAM_ALL_MASK);
udelay(20);
#endif
@@ -1095,7 +954,7 @@ static int pm_L2_physical_connection_and_power_up(u32 cluster)
pm_set_bits_syscon_register(syscon,
NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM1, RAM_BANK0_MASK);
- udelay(20);
+ udelay(20);
pm_set_bits_syscon_register(syscon,
NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM1, RAM_BANK1_LS_MASK);
pm_set_bits_syscon_register(syscon,
@@ -1108,11 +967,11 @@ static int pm_L2_physical_connection_and_power_up(u32 cluster)
NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM2, RAM_BANK3_MASK);
udelay(20);
#else
- pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM2, RAM_ALL_MASK);
+ pm_set_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM2, RAM_ALL_MASK);
udelay(20);
- pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM1, RAM_ALL_MASK);
+ pm_set_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM1, RAM_ALL_MASK);
udelay(20);
- pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM0, RAM_ALL_MASK);
+ pm_set_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL21RAM_PWRUPL2RAM0, RAM_ALL_MASK);
udelay(20);
#endif
break;
@@ -1135,11 +994,11 @@ static int pm_L2_physical_connection_and_power_up(u32 cluster)
NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM2, RAM_BANK3_MASK);
udelay(20);
#else
- pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM2, RAM_ALL_MASK);
+ pm_set_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM2, RAM_ALL_MASK);
udelay(20);
- pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM1, RAM_ALL_MASK);
+ pm_set_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM1, RAM_ALL_MASK);
udelay(20);
- pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM0, RAM_ALL_MASK);
+ pm_set_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL22RAM_PWRUPL2RAM0, RAM_ALL_MASK);
udelay(20);
#endif
break;
@@ -1162,11 +1021,11 @@ static int pm_L2_physical_connection_and_power_up(u32 cluster)
NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM2, RAM_BANK3_MASK);
udelay(20);
#else
- pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM2, RAM_ALL_MASK);
+ pm_set_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM2, RAM_ALL_MASK);
udelay(20);
- pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM1, RAM_ALL_MASK);
+ pm_set_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM1, RAM_ALL_MASK);
udelay(20);
- pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM0, RAM_ALL_MASK);
+ pm_set_bits_syscon_register(NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM0, RAM_ALL_MASK);
udelay(20);
#endif
break;
@@ -1176,86 +1035,82 @@ static int pm_L2_physical_connection_and_power_up(u32 cluster)
}
/* Clear the chip select */
- pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_CHIPSELECTEN, mask);
+ pm_clear_bits_syscon_register(NCP_SYSCON_PWR_CHIPSELECTEN, mask);
/* Release the isolation clamps */
- pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_ISOLATEL2MISC, mask);
+ pm_clear_bits_syscon_register(NCP_SYSCON_PWR_ISOLATEL2MISC, mask);
/* Turn the ACE bridge power on*/
- pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_ACEPWRDNRQ, mask);
+ pm_clear_bits_syscon_register(NCP_SYSCON_PWR_ACEPWRDNRQ, mask);
power_up_l2_cleanup:
-
- iounmap(syscon);
-
return rval;
}
static int pm_L2_logical_powerup(u32 cluster, u32 cpu)
{
- void __iomem *syscon;
u32 mask = (0x1 << cluster);
- u32 cpu_mask = (0x1 << cpu);
int rval = 0;
+ u32 cluster_mask;
- syscon = ioremap(SYSCON_PHYS_ADDR, SZ_64K);
- if (WARN_ON(!syscon))
- return -EINVAL;
+ if (cluster == 0)
+ cluster_mask = 0xe;
+ else
+ cluster_mask = 0xf << (cluster * 4);
/* put the cluster into a cpu hold */
- pm_set_bits_syscon_register(syscon, NCP_SYSCON_RESET_AXIS,
+ pm_or_bits_syscon_register(NCP_SYSCON_RESET_AXIS,
cluster_to_poreset[cluster]);
/*
- * Write the key so the reset cpu register can be written to.
+ * The key value has to be written before the CPU RST can be written.
*/
- pm_set_bits_syscon_register(syscon, NCP_SYSCON_KEY, VALID_KEY_VALUE);
- pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWRUP_CPU_RST, cpu_mask);
+ pm_set_bits_syscon_register(NCP_SYSCON_KEY, VALID_KEY_VALUE);
+ pm_or_bits_syscon_register(NCP_SYSCON_PWRUP_CPU_RST, cluster_mask);
/* Hold the chip debug cluster */
- pm_set_bits_syscon_register(syscon, NCP_SYSCON_KEY, VALID_KEY_VALUE);
- pm_set_bits_syscon_register(syscon, NCP_SYSCON_HOLD_DBG, mask);
+ pm_set_bits_syscon_register(NCP_SYSCON_KEY, VALID_KEY_VALUE);
+ pm_or_bits_syscon_register(NCP_SYSCON_HOLD_DBG, mask);
/* Hold the L2 cluster */
- pm_set_bits_syscon_register(syscon, NCP_SYSCON_KEY, VALID_KEY_VALUE);
- pm_set_bits_syscon_register(syscon, NCP_SYSCON_HOLD_L2, mask);
+ pm_set_bits_syscon_register(NCP_SYSCON_KEY, VALID_KEY_VALUE);
+ pm_or_bits_syscon_register(NCP_SYSCON_HOLD_L2, mask);
- iounmap(syscon);
/* Cluster physical power up */
rval = pm_L2_physical_connection_and_power_up(cluster);
+ if (rval)
+ goto exit_pm_L2_logical_powerup;
udelay(16);
- syscon = ioremap(SYSCON_PHYS_ADDR, SZ_64K);
- if (WARN_ON(!syscon))
- return -EINVAL;
-
/* take the cluster out of a cpu hold */
- pm_clear_bits_syscon_register(syscon, NCP_SYSCON_RESET_AXIS,
+ pm_clear_bits_syscon_register(NCP_SYSCON_RESET_AXIS,
cluster_to_poreset[cluster]);
udelay(64);
/* Enable the system counter */
- pm_set_bits_syscon_register(syscon, NCP_SYSCON_PWR_CSYSREQ_CNT, mask);
+ pm_or_bits_syscon_register(NCP_SYSCON_PWR_CSYSREQ_CNT, mask);
/* Release the L2 cluster */
- pm_set_bits_syscon_register(syscon, NCP_SYSCON_KEY, VALID_KEY_VALUE);
- pm_clear_bits_syscon_register(syscon, NCP_SYSCON_HOLD_L2, mask);
+ pm_set_bits_syscon_register(NCP_SYSCON_KEY, VALID_KEY_VALUE);
+ pm_clear_bits_syscon_register(NCP_SYSCON_HOLD_L2, mask);
/* Release the chip debug cluster */
- pm_set_bits_syscon_register(syscon, NCP_SYSCON_KEY, VALID_KEY_VALUE);
- pm_clear_bits_syscon_register(syscon, NCP_SYSCON_HOLD_DBG, mask);
-
+ pm_set_bits_syscon_register(NCP_SYSCON_KEY, VALID_KEY_VALUE);
+ pm_clear_bits_syscon_register(NCP_SYSCON_HOLD_DBG, mask);
+ /* Power up the dickens */
rval = pm_dickens_logical_powerup(cluster);
+ if (rval)
+ goto exit_pm_L2_logical_powerup;
/* start L2 */
- pm_clear_bits_syscon_register(syscon, NCP_SYSCON_PWR_ACINACTM, mask);
+ pm_clear_bits_syscon_register(NCP_SYSCON_PWR_ACINACTM, mask);
- iounmap(syscon);
+exit_pm_L2_logical_powerup:
return rval;
@@ -1267,12 +1122,6 @@ void pm_debug_read_pwr_registers(void)
{
u32 reg;
- void __iomem *syscon;
-
- syscon = ioremap(SYSCON_PHYS_ADDR, SZ_64K);
- if (WARN_ON(!syscon))
- return;
-
reg = readl(syscon + 0x1400);
pr_err("NCP_SYSCON_PWR_CLKEN: 0x%x\n", reg);
reg = readl(syscon + NCP_SYSCON_PWR_ACINACTM);
@@ -1441,7 +1290,6 @@ void pm_debug_read_pwr_registers(void)
#endif
- iounmap(syscon);
}
@@ -1449,11 +1297,7 @@ void pm_dump_L2_registers(void)
{
u32 reg;
- void __iomem *syscon;
- syscon = ioremap(SYSCON_PHYS_ADDR, SZ_64K);
- if (WARN_ON(!syscon))
- return;
reg = readl(syscon + 0x1580);
pr_err("NCP_SYSCON_PWR_PWRUPL20RAM_PWRUPL2RAM2: 0x%x\n", reg);
reg = readl(syscon + 0x1584);
@@ -1479,23 +1323,15 @@ void pm_dump_L2_registers(void)
reg = readl(syscon + 0x15ac);
pr_err("NCP_SYSCON_PWR_PWRUPL23RAM_PWRUPL2RAM0: 0x%x\n", reg);
- iounmap(syscon);
-}
+
void pm_dump_dickens(void)
{
- void __iomem *dickens;
u32 status;
u32 i;
- dickens = ioremap(DICKENS_PHYS_ADDR, SZ_4M);
- if (dickens == NULL) {
- pr_err("DICKENS: Failed to map the dickens registers\n");
- return;
- }
-
for (i = 0; i < DKN_HNF_TOTAL_NODES; ++i) {
status = readl(
dickens + (0x10000 * (DKN_HNF_NODE_ID + i))
@@ -1509,11 +1345,6 @@ void pm_dump_dickens(void)
+ DKN_MN_DVM_DOMAIN_CTL);
pr_err("DKN_MN_DVM_DOMAIN_CTL: 0x%x\n", status);
-
-
- iounmap(dickens);
-
-
}
#endif
diff --git a/arch/arm/mach-axxia/lsi_power_management.h b/arch/arm/mach-axxia/lsi_power_management.h
index 4cb6d1f..ef70af3 100644
--- a/arch/arm/mach-axxia/lsi_power_management.h
+++ b/arch/arm/mach-axxia/lsi_power_management.h
@@ -150,11 +150,19 @@
#define PORESET_CLUSTER2 (0x40000)
#define PORESET_CLUSTER3 (0x80000)
+/* IPI Masks */
+#define IPI0_MASK (0x1111)
+#define IPI1_MASK (0x2222)
+#define IPI2_MASK (0x4444)
+#define IPI3_MASK (0x8888)
+
/* SYSCON KEY Value */
#define VALID_KEY_VALUE (0xAB)
#define MAX_NUM_CLUSTERS (4)
#define CORES_PER_CLUSTER (4)
+#define MAX_IPI (19)
+#define MAX_CPUS (MAX_NUM_CLUSTERS * CORES_PER_CLUSTER)
typedef struct {
u32 cpu;
@@ -166,18 +174,19 @@ void pm_cpu_shutdown(u32 cpu);
int pm_cpu_powerup(u32 cpu);
void pm_debug_read_pwr_registers(void);
void pm_dump_L2_registers(void);
-void pm_cpu_logical_shutdown(void *data);
int pm_cpu_logical_die(pm_data *pm_request);
int pm_cpul2_logical_die(pm_data *pm_request);
unsigned long pm_get_powered_down_cpu(void);
bool pm_cpu_last_of_cluster(u32 cpu);
-void pm_L2_logical_shutdown(void *data);
void pm_dump_dickens(void);
void pm_init_cpu(u32 cpu);
void pm_cpu_logical_powerup(void);
-
+void pm_cluster_logical_powerup(void);
+bool pm_cpu_active(u32 cpu);
+void pm_init_syscon(void);
extern bool pm_in_progress[];
extern bool cluster_power_up[];
+extern u32 pm_cpu_powered_down;
#endif /* LSI_POWER_MANAGEMENT_H_ */
diff --git a/arch/arm/mach-axxia/platsmp.c b/arch/arm/mach-axxia/platsmp.c
index fb387f9..542efd0 100644
--- a/arch/arm/mach-axxia/platsmp.c
+++ b/arch/arm/mach-axxia/platsmp.c
@@ -26,13 +26,15 @@
#include "lsi_power_management.h"
#include <mach/axxia-gic.h>
-extern void axxia_secondary_startup(void);
-
#define SYSCON_PHYS_ADDR 0x002010030000ULL
+#define DICKENS_PHYS_ADDR 0x2000000000
static int __cpuinitdata wfe_fixup;
static int wfe_available;
+void __iomem *syscon;
+void __iomem *dickens;
+
inline void
__axxia_arch_wfe(void)
{
@@ -58,7 +60,7 @@ static void __init check_fixup_sev(void __iomem *syscon)
pr_info("axxia: Cross-cluster SEV fixup: %s\n", wfe_fixup ? "yes" : "no");
}
-static void __cpuinit do_fixup_sev(void)
+static void do_fixup_sev(void)
{
u32 tmp;
@@ -87,26 +89,37 @@ static void __cpuinit write_pen_release(int val)
static DEFINE_RAW_SPINLOCK(boot_lock);
-void __cpuinit axxia_secondary_init(unsigned int cpu)
+void axxia_secondary_init(unsigned int cpu)
{
- int phys_cpu, cluster;
+ int phys_cpu;
+ int phys_cluster;
phys_cpu = cpu_logical_map(cpu);
- cluster = (phys_cpu / 4) << 8;
+ phys_cluster = phys_cpu / 4;
/*
* Only execute this when powering up a cpu for hotplug.
*/
- if (!pm_in_progress[cpu]) {
+ if (!pm_in_progress[phys_cpu]) {
/* Fixup for cross-cluster SEV */
do_fixup_sev();
axxia_gic_secondary_init();
} else {
- axxia_gic_secondary_init();
+
+#ifdef CONFIG_HOTPLUG_CPU_COMPLETE_POWER_DOWN
+ if (cluster_power_up[phys_cluster])
+ pm_cluster_logical_powerup();
pm_cpu_logical_powerup();
- pm_in_progress[cpu] = false;
- cluster_power_up[cluster] = false;
+#endif
+ get_cpu();
+ axxia_gic_secondary_init();
+ put_cpu();
+
+#ifdef CONFIG_HOTPLUG_CPU_COMPLETE_POWER_DOWN
+ cluster_power_up[phys_cluster] = false;
+ pm_in_progress[phys_cpu] = false;
+#endif
}
/*
@@ -122,13 +135,12 @@ void __cpuinit axxia_secondary_init(unsigned int cpu)
_raw_spin_unlock(&boot_lock);
}
-int __cpuinit axxia_boot_secondary(unsigned int cpu, struct task_struct *idle)
+int axxia_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
int phys_cpu, cluster;
unsigned long timeout;
unsigned long powered_down_cpu;
- int rVal = 0;
/*
@@ -142,14 +154,8 @@ int __cpuinit axxia_boot_secondary(unsigned int cpu, struct task_struct *idle)
powered_down_cpu = pm_get_powered_down_cpu();
if (powered_down_cpu & (1 << phys_cpu)) {
- pm_in_progress[cpu] = true;
-
- rVal = pm_cpu_powerup(phys_cpu);
- if (rVal) {
- _raw_spin_unlock(&boot_lock);
- return rVal;
- }
-
+ pm_in_progress[phys_cpu] = true;
+ pm_cpu_powerup(phys_cpu);
}
/*
@@ -170,7 +176,6 @@ int __cpuinit axxia_boot_secondary(unsigned int cpu, struct task_struct *idle)
* Bits: |11 10 9 8|7 6 5 4 3 2|1 0
* | CLUSTER | Reserved |CPU
*/
- phys_cpu = cpu_logical_map(cpu);
cluster = (phys_cpu / 4) << 8;
phys_cpu = cluster + (phys_cpu % 4);
@@ -218,7 +223,6 @@ static __init struct device_node *get_cpu_node(int cpu)
static void __init axxia_smp_prepare_cpus(unsigned int max_cpus)
{
- void __iomem *syscon;
int cpu_count = 0;
int cpu;
@@ -226,6 +230,10 @@ static void __init axxia_smp_prepare_cpus(unsigned int max_cpus)
if (WARN_ON(!syscon))
return;
+ dickens = ioremap(DICKENS_PHYS_ADDR, SZ_4M);
+ if (WARN_ON(!dickens))
+ return;
+
check_fixup_sev(syscon);
do_fixup_sev();
@@ -290,8 +298,6 @@ static void __init axxia_smp_prepare_cpus(unsigned int max_cpus)
iounmap(release_virt);
}
}
-
- iounmap(syscon);
}
struct smp_operations axxia_smp_ops __initdata = {
--
1.7.9.5
More information about the linux-yocto
mailing list