[linux-yocto] [PATCH v2 05/39] arch/arm: arm changes to support the axxia BSP
Cristian Bercaru
cristian.bercaru at windriver.com
Thu May 21 02:40:30 PDT 2015
From: Charlie Paul <cpaul.windriver at gmail.com>
These files were changed to support the LSI axxia 5500 board.
Signed-off-by: Charlie Paul <cpaul.windriver at gmail.com>
---
Makefile | 14 +------
arch/arm/Kconfig | 84 ++++++++++++++++++++++++++++++++++++-
arch/arm/Kconfig.debug | 4 ++
arch/arm/Makefile | 3 +-
arch/arm/include/asm/futex.h | 38 +++++++++++++----
arch/arm/include/asm/kmap_types.h | 5 +++
arch/arm/include/asm/spinlock.h | 6 +++
arch/arm/kernel/asm-offsets.c | 10 +++--
arch/arm/kernel/head.S | 8 ++++
arch/arm/kernel/irq.c | 2 +-
arch/arm/kernel/perf_event_cpu.c | 8 +++-
arch/arm/kernel/perf_event_v7.c | 11 ++++-
arch/arm/mm/mmap.c | 2 +-
arch/arm/tools/mach-types | 1 +
init/Kconfig | 6 +++
init/main.c | 11 +++++
16 files changed, 181 insertions(+), 32 deletions(-)
diff --git a/Makefile b/Makefile
index 4b17ba4..d9d04e4 100644
--- a/Makefile
+++ b/Makefile
@@ -195,18 +195,8 @@ else
_all: modules
endif
-ifeq ($(KBUILD_SRC),)
- # building in the source tree
- srctree := .
-else
- ifeq ($(KBUILD_SRC)/,$(dir $(CURDIR)))
- # building in a subdirectory of the source tree
- srctree := ..
- else
- srctree := $(KBUILD_SRC)
- endif
-endif
-objtree := .
+srctree := $(if $(KBUILD_SRC),$(KBUILD_SRC),$(CURDIR))
+objtree := $(CURDIR)
src := $(srctree)
obj := $(objtree)
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 97d07ed..59268d1e 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -354,6 +354,29 @@ config ARCH_VERSATILE
help
This enables support for ARM Ltd Versatile board.
+config ARCH_AXXIA
+ bool "LSI Axxia family"
+ select ARCH_PHYS_ADDR_T_64BIT
+ select ARCH_DMA_ADDR_T_64BIT
+ select ARCH_WANT_OPTIONAL_GPIOLIB
+ select ARM_AMBA
+ select COMMON_CLK
+ select CLKDEV_LOOKUP
+ select CLKSRC_MMIO
+ select GENERIC_CLOCKEVENTS
+ select HAVE_CLK
+ select HAVE_PATA_PLATFORM
+ select ARM_TIMER_SP804
+ select ICST
+ select NEED_MACH_IO_H
+ select ZONE_DMA
+ select PCI
+ select PCI_DOMAINS if PCI
+ select ARCH_SUPPORTS_MSI if PCI
+ select HAS_RAPIDIO
+ help
+ This enables support for the LSI Axxia boards.
+
config ARCH_AT91
bool "Atmel AT91"
select ARCH_REQUIRE_GPIOLIB
@@ -956,6 +979,8 @@ source "arch/arm/mach-ux500/Kconfig"
source "arch/arm/mach-versatile/Kconfig"
+source "arch/arm/mach-axxia/Kconfig"
+
source "arch/arm/mach-vexpress/Kconfig"
source "arch/arm/plat-versatile/Kconfig"
@@ -1299,6 +1324,19 @@ source "drivers/pci/pcie/Kconfig"
source "drivers/pcmcia/Kconfig"
+config HAS_RAPIDIO
+ bool
+ default n
+
+config RAPIDIO
+ bool "RapidIO support"
+ depends on HAS_RAPIDIO || PCI
+ help
+ If you say Y here, the kernel will include drivers and
+ infrastructure code to support RapidIO interconnect devices.
+
+source "drivers/rapidio/Kconfig"
+
endmenu
menu "Kernel Features"
@@ -1466,12 +1504,46 @@ config NR_CPUS
depends on SMP
default "4"
+menu "Support for hot-pluggable CPUs"
+
config HOTPLUG_CPU
bool "Support for hot-pluggable CPUs"
- depends on SMP
+ depends on SMP && HOTPLUG
help
Say Y here to experiment with turning CPUs off and on. CPUs
can be controlled through /sys/devices/system/cpu.
+choice
+ prompt "CPU Power Down Mode"
+ default HOTPLUG_CPU_COMPLETE_POWER_DOWN
+ help
+ This is used to select how the CPU is going to be powered down. If LOW POWER
+ is selected then the CPU enters a WFI state and waits for an interrupt to
+ wake up. If COMPLETE POWER down is selected the CPU power is turned off. The only
+ way to power on the CPU is to execute a command.
+
+config HOTPLUG_CPU_COMPLETE_POWER_DOWN
+ bool "Power off the CPU"
+ help
+ This will power off the CPU completely. The irqs are migrated
+ to another CPU.
+
+config HOTPLUG_CPU_LOW_POWER
+ bool "Low Power CPU (wfi)"
+ help
+ This will put the CPU into a low power mode wfi mode. When an interrupt
+ is received the CPU will power on again.
+
+endchoice
+
+config HOTPLUG_CPU_L2_POWER_DOWN
+ bool "Power Off L2 Cache"
+ depends on HOTPLUG_CPU_COMPLETE_POWER_DOWN
+ default n if HOTPLUG_CPU_LOW_POWER
+ help
+ Select this if you want to power down the L2 cache when
+ all CPUS of a cluster have been powered off.
+
+endmenu
config ARM_PSCI
bool "Support for the ARM Power State Coordination Interface (PSCI)"
@@ -1483,6 +1555,16 @@ config ARM_PSCI
0022A ("Power State Coordination Interface System Software on
ARM processors").
+config LOCAL_TIMERS
+ bool "Use local timer interrupts"
+ depends on SMP
+ default y
+ help
+ Enable support for local timers on SMP platforms, rather then the
+ legacy IPI broadcast method. Local timers allows the system
+ accounting to be spread across the timer interval, preventing a
+ "thundering herd" at every timer tick.
+
# The GPIO number here must be sorted by descending number. In case of
# a multiplatform kernel, we just want the highest value required by the
# selected platforms.
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 5ddd490..abe631f 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -126,6 +126,10 @@ choice
bool "Kernel low-level debugging on sama5d4"
depends on HAVE_AT91_DBGU2
+ config DEBUG_LL_AXXIA_UART0
+ bool "Kernel low-level debugging via UART0"
+ depends on ARCH_AXXIA
+
config DEBUG_BCM2835
bool "Kernel low-level debugging on BCM2835 PL011 UART"
depends on ARCH_BCM2835
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 3f3aa4f..4957b67 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -139,7 +139,8 @@ endif
textofs-$(CONFIG_ARCH_MSM7X30) := 0x00208000
textofs-$(CONFIG_ARCH_MSM8X60) := 0x00208000
textofs-$(CONFIG_ARCH_MSM8960) := 0x00208000
-textofs-$(CONFIG_ARCH_AXXIA) := 0x00308000
+// Orig textofs-$(CONFIG_ARCH_AXXIA) := 0x00308000
+textofs-$(CONFIG_ARCH_AXXIA) := 0x00408000
# Machine directory name. This list is sorted alphanumerically
# by CONFIG_* macro name.
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
index 53e69da..e0413f5 100644
--- a/arch/arm/include/asm/futex.h
+++ b/arch/arm/include/asm/futex.h
@@ -3,6 +3,11 @@
#ifdef __KERNEL__
+#if defined(CONFIG_CPU_USE_DOMAINS) && defined(CONFIG_SMP)
+/* ARM doesn't provide unprivileged exclusive memory accessors */
+#include <asm-generic/futex.h>
+#else
+
#include <linux/futex.h>
#include <linux/uaccess.h>
#include <asm/errno.h>
@@ -45,7 +50,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT;
-
+ /* Setup memory barrier */
smp_mb();
/* Prefetching cannot fault */
prefetchw(uaddr);
@@ -61,6 +66,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
: "=&r" (ret), "=&r" (val)
: "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
: "cc", "memory");
+ /* Setup memory barrier */
smp_mb();
*uval = val;
@@ -110,7 +116,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
#endif /* !SMP */
static inline int
-futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
+futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
{
int op = (encoded_op >> 28) & 7;
int cmp = (encoded_op >> 24) & 15;
@@ -150,17 +156,31 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
if (!ret) {
switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
+ case FUTEX_OP_CMP_EQ:
+ ret = (oldval == cmparg);
+ break;
+ case FUTEX_OP_CMP_NE:
+ ret = (oldval != cmparg);
+ break;
+ case FUTEX_OP_CMP_LT:
+ ret = (oldval < cmparg);
+ break;
+ case FUTEX_OP_CMP_GE:
+ ret = (oldval >= cmparg);
+ break;
+ case FUTEX_OP_CMP_LE:
+ ret = (oldval <= cmparg);
+ break;
+ case FUTEX_OP_CMP_GT:
+ ret = (oldval > cmparg);
+ break;
+ default:
+ ret = -ENOSYS;
}
}
return ret;
}
+#endif /* !(CPU_USE_DOMAINS && SMP) */
#endif /* __KERNEL__ */
#endif /* _ASM_ARM_FUTEX_H */
diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
index 83eb2f7..3e527f9 100644
--- a/arch/arm/include/asm/kmap_types.h
+++ b/arch/arm/include/asm/kmap_types.h
@@ -4,6 +4,11 @@
/*
* This is the "bare minimum". AIO seems to require this.
*/
+#if (NR_CPUS > 15)
+/* Prevent overlap between fixmap mapping and CPU vector page for 16th core */
+#define KM_TYPE_NR 15
+#else
#define KM_TYPE_NR 16
+#endif
#endif
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index 0fa4184..f1dbe83 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -73,7 +73,12 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
: "cc");
while (lockval.tickets.next != lockval.tickets.owner) {
+#ifdef CONFIG_ARCH_AXXIA
+ extern void __axxia_arch_wfe(void);
+ __axxia_arch_wfe();
+#else
wfe();
+#endif
lockval.tickets.owner = ACCESS_ONCE(lock->tickets.owner);
}
@@ -126,6 +131,7 @@ static inline int arch_spin_is_locked(arch_spinlock_t *lock)
static inline int arch_spin_is_contended(arch_spinlock_t *lock)
{
struct __raw_tickets tickets = READ_ONCE(lock->tickets);
+
return (tickets.next - tickets.owner) > 1;
}
#define arch_spin_is_contended arch_spin_is_contended
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 2d2d608..462894d 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -1,7 +1,7 @@
/*
* Copyright (C) 1995-2003 Russell King
* 2001-2002 Keith Owens
- *
+ *
* Generate definitions needed by assembly language modules.
* This code generates raw asm output which is post-processed to extract
* and format the required data.
@@ -49,10 +49,12 @@
#error Your compiler is too buggy; it is known to miscompile kernels.
#error Known good compilers: 3.3, 4.x
#endif
+/*
#if GCC_VERSION >= 40800 && GCC_VERSION < 40803
#error Your compiler is too buggy; it is known to miscompile kernels
#error and result in filesystem corruption and oopses.
#endif
+*/
#endif
int main(void)
@@ -127,9 +129,9 @@ int main(void)
DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm));
DEFINE(VMA_VM_FLAGS, offsetof(struct vm_area_struct, vm_flags));
BLANK();
- DEFINE(VM_EXEC, VM_EXEC);
+ DEFINE(VM_EXEC, VM_EXEC);
BLANK();
- DEFINE(PAGE_SZ, PAGE_SIZE);
+ DEFINE(PAGE_SZ, PAGE_SIZE);
BLANK();
DEFINE(SYS_ERROR0, 0x9f0000);
BLANK();
@@ -210,5 +212,5 @@ int main(void)
#endif
DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr));
#endif
- return 0;
+ return 0;
}
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 664eee8..a0e9386 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -52,8 +52,13 @@
.equ swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE
.macro pgtbl, rd, phys
+#ifdef CONFIG_ARCH_AXXIA
+ ldr \rd, =(TEXT_OFFSET - PG_DIR_SIZE)
+ add \rd, \rd, \phys
+#else
add \rd, \phys, #TEXT_OFFSET
sub \rd, \rd, #PG_DIR_SIZE
+#endif
.endm
/*
@@ -301,6 +306,9 @@ __create_page_tables:
#endif
#else
orr r3, r3, #PMD_SECT_XN
+#ifdef CONFIG_ARCH_AXXIA
+ orr r7, r7, #0x20
+#endif
str r3, [r0], #4
#endif
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index ad857bad..e58e12c 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -158,7 +158,7 @@ static bool migrate_one_irq(struct irq_desc *desc)
c = irq_data_get_irq_chip(d);
if (!c->irq_set_affinity)
pr_debug("IRQ%u: unable to set affinity\n", d->irq);
- else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret)
+ else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret)
cpumask_copy(d->affinity, affinity);
return ret;
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
index dd9acc9..b422b89 100644
--- a/arch/arm/kernel/perf_event_cpu.c
+++ b/arch/arm/kernel/perf_event_cpu.c
@@ -199,6 +199,7 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
for_each_possible_cpu(cpu) {
struct pmu_hw_events *events = per_cpu_ptr(cpu_hw_events, cpu);
+
raw_spin_lock_init(&events->pmu_lock);
events->percpu_pmu = cpu_pmu;
}
@@ -261,6 +262,9 @@ static const struct pmu_probe_info pmu_probe_table[] = {
ARM_PMU_PROBE(ARM_CPU_PART_ARM11MPCORE, armv6mpcore_pmu_init),
ARM_PMU_PROBE(ARM_CPU_PART_CORTEX_A8, armv7_a8_pmu_init),
ARM_PMU_PROBE(ARM_CPU_PART_CORTEX_A9, armv7_a9_pmu_init),
+ ARM_PMU_PROBE(ARM_CPU_PART_CORTEX_A5, armv7_a5_pmu_init),
+ ARM_PMU_PROBE(ARM_CPU_PART_CORTEX_A15, armv7_a15_pmu_init),
+ ARM_PMU_PROBE(ARM_CPU_PART_CORTEX_A7, armv7_a7_pmu_init),
XSCALE_PMU_PROBE(ARM_CPU_XSCALE_ARCH_V1, xscale1pmu_init),
XSCALE_PMU_PROBE(ARM_CPU_XSCALE_ARCH_V2, xscale2pmu_init),
{ /* sentinel value */ }
@@ -310,8 +314,8 @@ static int cpu_pmu_device_probe(struct platform_device *pdev)
cpu_pmu = pmu;
cpu_pmu->plat_device = pdev;
-
- if (node && (of_id = of_match_node(cpu_pmu_of_device_ids, pdev->dev.of_node))) {
+ of_id = of_match_node(cpu_pmu_of_device_ids, pdev->dev.of_node);
+ if (node && of_id) {
init_fn = of_id->data;
ret = init_fn(pmu);
} else {
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 8993770..ddc267f 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -97,6 +97,7 @@ enum armv7_a5_perf_types {
/* ARMv7 Cortex-A15 specific event types */
enum armv7_a15_perf_types {
+ ARMV7_A15_PERFCTR_CPU_CYCLES = 0x11,
ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ = 0x40,
ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE = 0x41,
ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ = 0x42,
@@ -288,7 +289,7 @@ static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
*/
static const unsigned armv7_a15_perf_map[PERF_COUNT_HW_MAX] = {
PERF_MAP_ALL_UNSUPPORTED,
- [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
+ [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_A15_PERFCTR_CPU_CYCLES,
[PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
[PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
[PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
@@ -537,6 +538,7 @@ static const unsigned krait_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
static inline u32 armv7_pmnc_read(void)
{
u32 val;
+
asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val));
return val;
}
@@ -567,6 +569,7 @@ static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, int idx)
static inline void armv7_pmnc_select_counter(int idx)
{
u32 counter = ARMV7_IDX_TO_COUNTER(idx);
+
asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter));
isb();
}
@@ -618,24 +621,28 @@ static inline void armv7_pmnc_write_evtsel(int idx, u32 val)
static inline void armv7_pmnc_enable_counter(int idx)
{
u32 counter = ARMV7_IDX_TO_COUNTER(idx);
+
asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(counter)));
}
static inline void armv7_pmnc_disable_counter(int idx)
{
u32 counter = ARMV7_IDX_TO_COUNTER(idx);
+
asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(counter)));
}
static inline void armv7_pmnc_enable_intens(int idx)
{
u32 counter = ARMV7_IDX_TO_COUNTER(idx);
+
asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(counter)));
}
static inline void armv7_pmnc_disable_intens(int idx)
{
u32 counter = ARMV7_IDX_TO_COUNTER(idx);
+
asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter)));
isb();
/* Clear the overflow flag in case an interrupt is pending. */
@@ -1144,7 +1151,9 @@ static void krait_write_pmresrn(int n, u32 val)
static u32 krait_read_vpmresr0(void)
{
u32 val;
+
asm volatile("mrc p10, 7, %0, c11, c0, 0" : "=r" (val));
+
return val;
}
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index 5e85ed3..adf24fd 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -11,7 +11,7 @@
#include <linux/random.h>
#include <asm/cachetype.h>
-#define COLOUR_ALIGN(addr,pgoff) \
+#define COLOUR_ALIGN(addr, pgoff) \
((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
(((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
diff --git a/arch/arm/tools/mach-types b/arch/arm/tools/mach-types
index a10297d..1794ad4 100644
--- a/arch/arm/tools/mach-types
+++ b/arch/arm/tools/mach-types
@@ -1007,3 +1007,4 @@ eco5_bx2 MACH_ECO5_BX2 ECO5_BX2 4572
eukrea_cpuimx28sd MACH_EUKREA_CPUIMX28SD EUKREA_CPUIMX28SD 4573
domotab MACH_DOMOTAB DOMOTAB 4574
pfla03 MACH_PFLA03 PFLA03 4575
+axxia MACH_AXXIA AXXIA 4173
diff --git a/init/Kconfig b/init/Kconfig
index e70bf19..f3c8418 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1333,6 +1333,12 @@ config SYSCTL_ARCH_UNALIGN_ALLOW
the unaligned access emulation.
see arch/parisc/kernel/unaligned.c for reference
+config HOTPLUG
+ def_bool y
+
+config GENERIC_IRQ_LEGACY
+ def_bool y
+
config HAVE_PCSPKR_PLATFORM
bool
diff --git a/init/main.c b/init/main.c
index 4f5742f..a83dc14 100644
--- a/init/main.c
+++ b/init/main.c
@@ -504,6 +504,17 @@ asmlinkage __visible void __init start_kernel(void)
char *command_line;
char *after_dashes;
+#if defined(CONFIG_ARCH_AXXIA) && defined(DEBUG_LL)
+ {
+ *(unsigned long *)(0xf0080000 + 0x24) = 13;
+ *(unsigned long *)(0xf0080000 + 0x28) = 1;
+ *(unsigned long *)(0xf0080000 + 0x2c) = 0x70;
+ *(unsigned long *)(0xf0080000 + 0x30) = 0x301;
+ *(unsigned long *)(0xf0080000 + 0x34) = 0;
+ *(unsigned long *)(0xf0080000 + 0x38) = 0x700;
+ }
+#endif /* CONFIG_ARCH_AXXIA && DEBUG_LL */
+
/*
* Need to run as early as possible, to initialize the
* lockdep hash:
--
1.7.9.5
More information about the linux-yocto
mailing list