[linux-yocto] [PATCH v2 22/39] ARM: irq: Return error when set_affinity is called from hotplug
Cristian Bercaru
cristian.bercaru at windriver.com
Thu May 21 02:40:47 PDT 2015
From: Magnus Karlsson <magnus.karlsson at intel.com>
Set_affinity now returns an error when called from the hotplug
path. This will be in place until we implement a way to migrate
interrupts between clusters during a hotplug operation. Note that
set_affinity from user space works as before.
Signed-off-by: Magnus Karlsson <magnus.karlsson at intel.com>
---
arch/arm/mach-axxia/axxia-gic.c | 71 ++++++++++++++++++++++++++++-----------
1 file changed, 52 insertions(+), 19 deletions(-)
diff --git a/arch/arm/mach-axxia/axxia-gic.c b/arch/arm/mach-axxia/axxia-gic.c
index a87ef9c..cdfc8fa 100644
--- a/arch/arm/mach-axxia/axxia-gic.c
+++ b/arch/arm/mach-axxia/axxia-gic.c
@@ -506,6 +506,37 @@ static void gic_clr_affinity_remote(void *info)
}
+static bool on_same_cluster(u32 pcpu1, u32 pcpu2)
+{
+ return pcpu1 / CORES_PER_CLUSTER == pcpu2 / CORES_PER_CLUSTER;
+}
+
+static int exec_remote_set_affinity(bool clr_affinity, u32 cpu,
+ struct irq_data *d,
+ const struct cpumask *mask_val,
+ bool force)
+{
+ int ret = IRQ_SET_MASK_OK;
+
+ if (force == false) {
+ gic_rpc_data.d = d;
+ gic_rpc_data.mask_val = mask_val;
+ if (clr_affinity == true) {
+ gic_rpc_data.oldcpu = cpu;
+ gic_rpc_data.func_mask |= CLR_AFFINITY;
+ } else {
+ gic_rpc_data.cpu = cpu;
+ gic_rpc_data.func_mask |= SET_AFFINITY;
+ }
+
+ } else {
+ pr_warn("Set affinity for hotplug not implemented.\n");
+ return -ENOSYS;
+ }
+
+ return ret;
+}
+
static int gic_set_affinity(struct irq_data *d,
const struct cpumask *mask_val,
bool force)
@@ -513,6 +544,7 @@ static int gic_set_affinity(struct irq_data *d,
unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
u32 pcpu = cpu_logical_map(smp_processor_id());
unsigned int irqid = gic_irq(d);
+ int ret = IRQ_SET_MASK_OK;
BUG_ON(!irqs_disabled());
@@ -538,16 +570,14 @@ static int gic_set_affinity(struct irq_data *d,
* cluster as the cpu we're currently running on, set the IRQ
* affinity directly. Otherwise, use the RPC mechanism.
*/
- if ((cpu_logical_map(cpu) / CORES_PER_CLUSTER) ==
- (pcpu / CORES_PER_CLUSTER)) {
- gic_set_affinity(d, mask_val, false);
+ if (on_same_cluster(cpu_logical_map(cpu), pcpu))
+ _gic_set_affinity(d, mask_val, false);
- } else {
+ else{
+ ret = exec_remote_set_affinity(false, cpu, d, mask_val, force);
- gic_rpc_data.func_mask |= SET_AFFINITY;
- gic_rpc_data.cpu = cpu;
- gic_rpc_data.d = d;
- gic_rpc_data.mask_val = mask_val;
+ if (ret != IRQ_SET_MASK_OK)
+ return ret;
}
/*
@@ -555,23 +585,26 @@ static int gic_set_affinity(struct irq_data *d,
* different than the prior cluster, clear the IRQ affinity
* on the old cluster.
*/
- if ((irqid != IRQ_PMU) && ((cpu_logical_map(cpu) / CORES_PER_CLUSTER) !=
- (irq_cpuid[irqid] / CORES_PER_CLUSTER))) {
+ if (!on_same_cluster(cpu_logical_map(cpu), irq_cpuid[irqid]) && (irqid != IRQ_PMU)) {
/*
* If old cpu assignment falls within the same cluster as
* the cpu we're currently running on, clear the IRQ affinity
* directly. Otherwise, use RPC mechanism.
*/
- if ((irq_cpuid[irqid] / CORES_PER_CLUSTER) ==
- (pcpu / CORES_PER_CLUSTER)) {
- gic_set_affinity(d, mask_val, true);
- } else {
- gic_rpc_data.func_mask |= CLR_AFFINITY;
- gic_rpc_data.oldcpu = irq_cpuid[irqid];
- gic_rpc_data.d = d;
- gic_rpc_data.mask_val = mask_val; kfree((void *) gic_rpc_ptr);
+ if (on_same_cluster(irq_cpuid[irqid], pcpu))
+ _gic_set_affinity(d, mask_val, true);
+ else
+ ret = exec_remote_set_affinity(true,
+ get_logical_index(irq_cpuid[irqid]), d, mask_val, force);
+ if (ret != IRQ_SET_MASK_OK) {
+ /* Need to back out the set operation */
+ if (on_same_cluster(cpu_logical_map(cpu), pcpu))
+ _gic_set_affinity(d, mask_val, true);
+ else
+ exec_remote_set_affinity(true, cpu, d, mask_val, force);
+
+ return ret;
}
- schedule_work_on(0, &axxia_gic_affinity_work);
}
--
1.7.9.5
More information about the linux-yocto
mailing list