[linux-yocto] [PATCH 05/38] arch/arm/drivers/dma: Changes to support the axxia BSP
Cristian Bercaru
cristian.bercaru at windriver.com
Wed May 20 12:23:48 PDT 2015
From: Charlie Paul <cpaul.windriver at gmail.com>
These files were changed and added to support the
LSI axxia 5500 board.
Signed-off-by: Charlie Paul <cpaul.windriver at gmail.com>
---
drivers/dma/Kconfig | 9 +
drivers/dma/Makefile | 1 +
drivers/dma/lsi-dma32.c | 883 +++++++++++++++++++++++++++++++++++++++++++++++
drivers/dma/lsi-dma32.h | 221 ++++++++++++
4 files changed, 1114 insertions(+)
create mode 100644 drivers/dma/lsi-dma32.c
create mode 100644 drivers/dma/lsi-dma32.h
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index f2b2c4e..fd2b3d7 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -114,6 +114,15 @@ config AT_XDMAC
help
Support the Atmel XDMA controller.
+config LSI_DMA
+ tristate "LSI General Purpose DMA support"
+ select DMA_ENGINE
+ select DMA_VIRTUAL_CHANNELS
+ select ASYNC_TX_ENABLE_CHANNEL_SWITCH
+ help
+ Enable support for the LSI General Purpose DMA controller found
+ on ACP34xx, AXM25xx and AXM55xx devices.
+
config FSL_DMA
tristate "Freescale Elo series DMA support"
depends on FSL_SOC
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 2022b54..a9fa822 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_INTEL_MID_DMAC) += intel_mid_dma.o
obj-$(CONFIG_DMATEST) += dmatest.o
obj-$(CONFIG_INTEL_IOATDMA) += ioat/
obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
+obj-$(CONFIG_LSI_DMA) += lsi-dma32.o
obj-$(CONFIG_FSL_DMA) += fsldma.o
obj-$(CONFIG_MPC512X_DMA) += mpc512x_dma.o
obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/
diff --git a/drivers/dma/lsi-dma32.c b/drivers/dma/lsi-dma32.c
new file mode 100644
index 0000000..6d1cc1a
--- /dev/null
+++ b/drivers/dma/lsi-dma32.c
@@ -0,0 +1,883 @@
+/*
+ * Driver for the LSI DMA controller DMA-32.
+ *
+ * The driver is based on:
+ *
+ * lsi-dma32.c -
+ * lsi-dma.c - Copyright 2011 Mentor Graphics
+ * acp_gpdma.c - Copyright (c) 2011, Ericsson AB
+ * Niclas Bengtsson <niklas.x.bengtsson at ericsson.com>
+ * Kerstin Jonsson <kerstin.jonsson at ericsson.com>
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ * You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+#include <linux/export.h>
+#include <linux/stat.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of.h>
+#include <linux/delay.h>
+#include <asm/page.h>
+#include <linux/bitops.h>
+#include <linux/atomic.h>
+#include <linux/sizes.h>
+#include "virt-dma.h"
+#include "lsi-dma32.h"
+
+#ifdef DEBUG
+#define engine_dbg(engine, fmt, ...) \
+ do { \
+ struct gpdma_engine *_e = engine; \
+ (void)_e; \
+ pr_debug("dma0: " fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define ch_dbg(dmac, fmt, ...) \
+ do { \
+ struct gpdma_channel *_c = dmac; \
+ (void)_c; \
+ pr_debug("dma0ch%d: [%s] " fmt, \
+ dmac->id, __func__, ##__VA_ARGS__); \
+ } while (0)
+#else
+#define engine_dbg(engine, fmt, ...) do {} while (0)
+#define ch_dbg(dmac, fmt, ...) do {} while (0)
+#endif
+
+static unsigned int burst = 5;
+module_param(burst, uint, 0644);
+MODULE_PARM_DESC(burst,
+ "Preferred burst setting (0=SINGLE,3=INCR4,5=INCR8,7=INCR16)");
+
+static void reset_channel(struct gpdma_channel *dmac)
+{
+ const int WAIT = 1024;
+ int i;
+
+ /* Pause channel */
+ writel(DMA_STATUS_CH_PAUS_WR_EN | DMA_STATUS_CH_PAUSE,
+ dmac->base+DMA_STATUS);
+ /* Memory Barrier */
+ wmb();
+
+ /* Disable channel */
+ writel(0, dmac->base+DMA_CHANNEL_CONFIG);
+ for (i = 0; readl(dmac->base+DMA_CHANNEL_CONFIG) && i < WAIT; i++)
+ cpu_relax();
+ if (i == WAIT)
+ ch_dbg(dmac, "Failed to DISABLE channel\n");
+
+ /* Clear FIFO */
+ writel(DMA_CONFIG_CLEAR_FIFO, dmac->base+DMA_CHANNEL_CONFIG);
+ for (i = 0; readl(dmac->base+DMA_CHANNEL_CONFIG) && i < WAIT; i++)
+ cpu_relax();
+ if (i == WAIT)
+ ch_dbg(dmac, "Failed to clear FIFO\n");
+}
+
+static void soft_reset(struct gpdma_engine *engine)
+{
+ int i;
+ u32 cfg;
+
+ /* Reset all channels */
+ for (i = 0; i < engine->chip->num_channels; i++)
+ reset_channel(&engine->channel[i]);
+
+ /* Reset GPDMA by writing Magic Number to reset reg */
+ writel(GPDMA_MAGIC, engine->gbase + SOFT_RESET);
+ /* Memory Barrier */
+ wmb();
+
+ cfg = (engine->pool.phys & 0xfff00000) | GEN_CONFIG_EXT_MEM;
+
+ if (engine->chip->flags & LSIDMA_EDGE_INT) {
+ for (i = 0; i < engine->chip->num_channels; i++)
+ cfg |= GEN_CONFIG_INT_EDGE(i);
+ engine_dbg(engine, "Using edge-triggered interrupts\n");
+ }
+ writel(cfg, engine->gbase + GEN_CONFIG);
+ engine_dbg(engine, "engine->desc.phys & 0xfff00000 == %llx\n",
+ (engine->pool.phys & 0xfff00000));
+
+ engine->ch_busy = 0;
+}
+
+static int alloc_desc_table(struct gpdma_engine *engine)
+{
+ /*
+ * For controllers that doesn't support full descriptor addresses, all
+ * descriptors must be in the same 1 MB page, i.e address bits 31..20
+ * must be the same for all descriptors.
+ */
+ u32 order = 20 - PAGE_SHIFT;
+ int i;
+
+ if (engine->chip->flags & LSIDMA_NEXT_FULL) {
+ /*
+ * Controller can do full descriptor addresses, then we need no
+ * special alignment on the descriptor block.
+ */
+ order = get_order(GPDMA_MAX_DESCRIPTORS *
+ sizeof(struct gpdma_desc));
+ }
+
+ engine->pool.va = (struct gpdma_desc *)
+ __get_free_pages(GFP_KERNEL|GFP_DMA, order);
+ if (!engine->pool.va)
+ return -ENOMEM;
+ engine->pool.order = order;
+ engine->pool.phys = virt_to_phys(engine->pool.va);
+ engine_dbg(engine, "order=%d pa=%#llx va=%p\n",
+ engine->pool.order, engine->pool.phys, engine->pool.va);
+
+ INIT_LIST_HEAD(&engine->free_list);
+ for (i = 0; i < GPDMA_MAX_DESCRIPTORS; i++) {
+ struct gpdma_desc *desc = &engine->pool.va[i];
+
+ async_tx_ack(&desc->vdesc.tx);
+ desc->engine = engine;
+ list_add_tail(&desc->vdesc.node, &engine->free_list);
+ }
+
+ return 0;
+}
+
+static void free_desc_table(struct gpdma_engine *engine)
+{
+ if (engine->pool.va)
+ free_pages((unsigned long)engine->pool.va, engine->pool.order);
+}
+
+static struct gpdma_desc *get_descriptor(struct gpdma_engine *engine)
+{
+ unsigned long flags;
+ struct gpdma_desc *new = NULL, *desc, *tmp;
+
+ spin_lock_irqsave(&engine->lock, flags);
+ list_for_each_entry_safe(desc, tmp, &engine->free_list, vdesc.node) {
+ if (async_tx_test_ack(&desc->vdesc.tx)) {
+ list_del(&desc->vdesc.node);
+ new = desc;
+ new->chain = NULL;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&engine->lock, flags);
+
+ return new;
+}
+
+/**
+ * init_descriptor - Fill out all descriptor fields
+ */
+static void init_descriptor(struct gpdma_desc *desc,
+ dma_addr_t src, u32 src_acc,
+ dma_addr_t dst, u32 dst_acc,
+ size_t len)
+{
+ u32 src_count = len >> src_acc;
+ u32 dst_count = len >> dst_acc;
+ u32 rot_len = (2 * (1 << src_acc)) - 1;
+
+ BUG_ON(src_count * (1<<src_acc) != len);
+ BUG_ON(dst_count * (1<<dst_acc) != len);
+
+ desc->src = src;
+ desc->dst = dst;
+
+ desc->hw.src_x_ctr = cpu_to_le16(src_count - 1);
+ desc->hw.src_y_ctr = 0;
+ desc->hw.src_x_mod = cpu_to_le32(1 << src_acc);
+ desc->hw.src_y_mod = 0;
+ desc->hw.src_addr = cpu_to_le32(src & 0xffffffff);
+ desc->hw.src_data_mask = ~0;
+ desc->hw.src_access = cpu_to_le16((rot_len << 6) |
+ (src_acc << 3) |
+ (burst & 7));
+ desc->hw.dst_access = cpu_to_le16((dst_acc << 3) |
+ (burst & 7));
+ desc->hw.ch_config = cpu_to_le32(DMA_CONFIG_ONE_SHOT(1));
+ desc->hw.next_ptr = 0;
+ desc->hw.dst_x_ctr = cpu_to_le16(dst_count - 1);
+ desc->hw.dst_y_ctr = 0;
+ desc->hw.dst_x_mod = cpu_to_le32(1 << dst_acc);
+ desc->hw.dst_y_mod = 0;
+ desc->hw.dst_addr = cpu_to_le32(dst & 0xffffffff);
+}
+
+static phys_addr_t desc_to_paddr(const struct gpdma_channel *dmac,
+ const struct gpdma_desc *desc)
+{
+ phys_addr_t paddr = virt_to_phys(&desc->hw);
+
+ WARN_ON(paddr & 0xf);
+ if (dmac->engine->chip->flags & LSIDMA_NEXT_FULL)
+ paddr |= 0x8;
+ else
+ paddr &= 0xfffff;
+
+ return paddr;
+}
+
+static void free_descriptor(struct virt_dma_desc *vd)
+{
+ struct gpdma_desc *desc = to_gpdma_desc(vd);
+ struct gpdma_engine *engine = desc->engine;
+ unsigned long flags;
+
+ BUG_ON(desc == NULL);
+
+ spin_lock_irqsave(&engine->lock, flags);
+ while (desc) {
+ list_add_tail(&desc->vdesc.node, &engine->free_list);
+ desc = desc->chain;
+ }
+ spin_unlock_irqrestore(&engine->lock, flags);
+}
+
+static int segment_match(struct gpdma_engine *engine, struct gpdma_desc *desc)
+{
+ unsigned int gpreg_dma = readl(engine->gpreg);
+ unsigned int seg_src = (gpreg_dma >> 0) & 0x3f;
+ unsigned int seg_dst = (gpreg_dma >> 8) & 0x3f;
+
+ return (seg_src == ((desc->src >> 32) & 0x3f) &&
+ seg_dst == ((desc->dst >> 32) & 0x3f));
+}
+
+static void gpdma_start(struct gpdma_channel *dmac)
+{
+ struct virt_dma_desc *vdesc;
+ struct gpdma_desc *desc;
+ phys_addr_t paddr;
+
+ vdesc = vchan_next_desc(&dmac->vc);
+ if (!vdesc) {
+ clear_bit(dmac->id, &dmac->engine->ch_busy);
+ dmac->active = NULL;
+ return;
+ }
+
+ /* Remove from list and mark as active */
+ list_del(&vdesc->node);
+ desc = to_gpdma_desc(vdesc);
+ dmac->active = desc;
+
+ if (!(dmac->engine->chip->flags & LSIDMA_SEG_REGS)) {
+ /*
+ * No segment registers -> descriptor address bits must match
+ * running descriptor on any other channel.
+ */
+ if (dmac->engine->ch_busy && !segment_match(dmac->engine, desc))
+ return;
+ }
+
+ /* Physical address of descriptor to load */
+ paddr = desc_to_paddr(dmac, desc);
+ writel((u32)paddr, dmac->base + DMA_NXT_DESCR);
+
+ if (dmac->engine->chip->flags & LSIDMA_SEG_REGS) {
+ /* Segment bits [39..32] of descriptor, src and dst addresses */
+ writel(paddr >> 32, dmac->base + DMA_DESCR_ADDR_SEG);
+ writel(desc->src >> 32, dmac->base + DMA_SRC_ADDR_SEG);
+ writel(desc->dst >> 32, dmac->base + DMA_DST_ADDR_SEG);
+ } else {
+ unsigned int seg_src = (desc->src >> 32) & 0x3f;
+ unsigned int seg_dst = (desc->dst >> 32) & 0x3f;
+
+ writel((seg_dst << 8) | seg_src, dmac->engine->gpreg);
+ }
+ /* Memory barrier */
+ wmb();
+ writel(DMA_CONFIG_DSC_LOAD, dmac->base + DMA_CHANNEL_CONFIG);
+ set_bit(dmac->id, &dmac->engine->ch_busy);
+}
+
+static irqreturn_t gpdma_isr_err(int irqno, void *_engine)
+{
+ struct gpdma_engine *engine = _engine;
+ u32 status = readl(engine->gbase + GEN_STAT);
+ u32 ch = (status & GEN_STAT_CH0_ERROR) ? 0 : 1;
+ struct gpdma_channel *dmac = &engine->channel[ch];
+
+ if (0 == (status & (GEN_STAT_CH0_ERROR | GEN_STAT_CH1_ERROR)))
+ return IRQ_NONE;
+
+ /* Read the channel status bits and dump the error */
+ status = readl(dmac->base + DMA_STATUS);
+ pr_err("dma: channel%d error %08x\n", dmac->id, status);
+ /* Clear the error indication */
+ writel(DMA_STATUS_ERROR, dmac->base+DMA_STATUS);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t gpdma_isr(int irqno, void *_dmac)
+{
+ struct gpdma_channel *dmac = _dmac;
+ struct gpdma_desc *desc = dmac->active;
+ u32 status;
+ u32 error;
+
+ status = readl(dmac->base+DMA_STATUS);
+ error = status & DMA_STATUS_ERROR;
+ writel(DMA_STATUS_CLEAR, dmac->base+DMA_STATUS);
+
+ ch_dbg(dmac, "irq%u channel status %08x, error %08x\n",
+ irqno, status, error);
+
+ WARN_ON((status & DMA_STATUS_CH_ACTIVE) != 0);
+
+ if (error) {
+ if (error & DMA_STATUS_UNALIGNED_ERR) {
+ dev_warn(dmac->engine->dev,
+ "Unaligned transaction on ch%d (status=%#x)\n",
+ dmac->id, status);
+ reset_channel(dmac);
+ } else {
+ dev_warn(dmac->engine->dev,
+ "DMA transaction error on ch%d (status=%#x)\n",
+ dmac->id, status);
+ }
+ }
+
+ BUG_ON(desc == NULL);
+
+ spin_lock(&dmac->vc.lock);
+ vchan_cookie_complete(&desc->vdesc);
+ dmac->active = NULL;
+ if (vchan_next_desc(&dmac->vc)) {
+ gpdma_start(dmac);
+ } else {
+ /* Stop channel */
+ writel(0, dmac->base + DMA_CHANNEL_CONFIG);
+ writel(DMA_CONFIG_CLEAR_FIFO, dmac->base + DMA_CHANNEL_CONFIG);
+ clear_bit(dmac->id, &dmac->engine->ch_busy);
+ }
+ spin_unlock(&dmac->vc.lock);
+
+ return IRQ_HANDLED;
+}
+
+static void flush_channel(struct gpdma_channel *dmac)
+{
+ unsigned long flags;
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&dmac->vc.lock, flags);
+ reset_channel(dmac);
+ if (dmac->active) {
+ free_descriptor(&dmac->active->vdesc);
+ dmac->active = NULL;
+ }
+ vchan_get_all_descriptors(&dmac->vc, &head);
+ spin_unlock_irqrestore(&dmac->vc.lock, flags);
+ vchan_dma_desc_free_list(&dmac->vc, &head);
+}
+
+/*
+ * Perform soft reset procedure on DMA Engine. Needed occasionally to work
+ * around nasty bug ACP3400 sRIO HW.
+ */
+static ssize_t __ref
+reset_engine(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct gpdma_engine *engine = dev_get_drvdata(dev);
+ int i;
+
+ if (!engine)
+ return -EINVAL;
+
+ /* Disable interrupts and tasklet and acquire each channel lock */
+ for (i = 0; i < engine->chip->num_channels; i++) {
+ struct gpdma_channel *dmac = &engine->channel[i];
+
+ disable_irq(dmac->irq);
+ spin_lock(&dmac->vc.lock);
+ tasklet_disable(&dmac->vc.task);
+ }
+
+ soft_reset(engine);
+
+ for (i = 0; i < engine->chip->num_channels; i++) {
+ struct gpdma_channel *dmac = &engine->channel[i];
+
+ tasklet_enable(&dmac->vc.task);
+ enable_irq(dmac->irq);
+ /* Restart any active jobs */
+ if (dmac->active) {
+ struct gpdma_desc *active = dmac->active;
+
+ dmac->active = NULL;
+ list_add(&active->vdesc.node, &dmac->vc.desc_submitted);
+ if (vchan_issue_pending(&dmac->vc))
+ gpdma_start(dmac);
+ }
+ spin_unlock(&dmac->vc.lock);
+ }
+
+ return count;
+}
+static DEVICE_ATTR(soft_reset, S_IWUSR, NULL, reset_engine);
+
+/*
+ *===========================================================================
+ *
+ * DMA DEVICE INTERFACE
+ *
+ *===========================================================================
+ *
+ */
+
+/**
+ * gpdma_alloc_chan_resources - Allocate resources and return the number of
+ * allocated descriptors.
+ *
+ */
+static int gpdma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct gpdma_channel *dmac = to_gpdma_chan(chan);
+
+ (void) dmac;
+ return 1;
+}
+
+/**
+ * gpdma_free_chan_resources - Release DMA channel's resources.
+ *
+ */
+static void gpdma_free_chan_resources(struct dma_chan *chan)
+{
+ struct gpdma_channel *dmac = to_gpdma_chan(chan);
+
+ (void) dmac;
+}
+
+/**
+ * gpdma_prep_sg - Prepares a transfer using sg lists.
+ *
+ */
+static struct dma_async_tx_descriptor *
+gpdma_prep_sg(struct dma_chan *chan,
+ struct scatterlist *dst_sg, unsigned int dst_nents,
+ struct scatterlist *src_sg, unsigned int src_nents,
+ unsigned long flags)
+{
+ struct gpdma_channel *dmac = to_gpdma_chan(chan);
+ struct gpdma_desc *first = NULL, *prev = NULL, *new;
+ size_t dst_avail, src_avail;
+ dma_addr_t dst, src;
+ u32 src_acc, dst_acc;
+ size_t len;
+
+ if (dst_nents == 0 || src_nents == 0)
+ return NULL;
+
+ if (dst_sg == NULL || src_sg == NULL)
+ return NULL;
+
+ dst_avail = sg_dma_len(dst_sg);
+ src_avail = sg_dma_len(src_sg);
+
+ /* Loop until we run out of entries... */
+ for (;;) {
+ /* Descriptor count is limited to 64K */
+ len = min_t(size_t, src_avail, dst_avail);
+ len = min_t(size_t, len, (size_t)SZ_64K);
+
+ if (len > 0) {
+ dst = sg_dma_address(dst_sg) +
+ sg_dma_len(dst_sg) - dst_avail;
+ src = sg_dma_address(src_sg) +
+ sg_dma_len(src_sg) - src_avail;
+
+ src_acc = min(ffs((u32)src | len) - 1, 4);
+ dst_acc = min(ffs((u32)dst | len) - 1, 4);
+
+ new = get_descriptor(dmac->engine);
+ if (!new) {
+ ch_dbg(dmac, "ERROR: No descriptor\n");
+ goto fail;
+ }
+
+ init_descriptor(new, src, src_acc, dst, dst_acc, len);
+
+ /* Link descriptors together */
+ if (!first) {
+ first = new;
+ } else {
+ prev->hw.next_ptr = desc_to_paddr(dmac, new);
+ prev->chain = new;
+ }
+ prev = new;
+
+ /* update metadata */
+ dst_avail -= len;
+ src_avail -= len;
+ }
+
+ /* dst: Advance to next sg-entry */
+ if (dst_avail == 0) {
+ /* no more entries: we're done */
+ if (dst_nents == 0)
+ break;
+ /* fetch the next entry: if there are no more: done */
+ dst_sg = sg_next(dst_sg);
+ if (dst_sg == NULL)
+ break;
+
+ dst_nents--;
+ dst_avail = sg_dma_len(dst_sg);
+ }
+
+ /* src: Advance to next sg-entry */
+ if (src_avail == 0) {
+ /* no more entries: we're done */
+ if (src_nents == 0)
+ break;
+ /* fetch the next entry: if there are no more: done */
+ src_sg = sg_next(src_sg);
+ if (src_sg == NULL)
+ break;
+
+ src_nents--;
+ src_avail = sg_dma_len(src_sg);
+ }
+ }
+
+ /* Interrupt on last descriptor in chain */
+ prev->hw.ch_config |= cpu_to_le32(DMA_CONFIG_END);
+
+ return vchan_tx_prep(&dmac->vc, &first->vdesc, flags);
+
+fail:
+ if (first)
+ free_descriptor(&first->vdesc);
+ return NULL;
+}
+
+/**
+ * gpdma_prep_memcpy - Prepares a memcpy operation.
+ *
+ */
+static struct dma_async_tx_descriptor *
+gpdma_prep_memcpy(struct dma_chan *chan,
+ dma_addr_t dst,
+ dma_addr_t src,
+ size_t size,
+ unsigned long dma_flags)
+{
+ struct gpdma_channel *dmac = to_gpdma_chan(chan);
+ struct gpdma_desc *first = NULL, *prev = NULL, *new;
+ u32 src_acc, dst_acc;
+ size_t len;
+
+ if (size == 0)
+ return NULL;
+
+ do {
+ new = get_descriptor(dmac->engine);
+ if (new == NULL) {
+ ch_dbg(dmac, "ERROR: No descriptor\n");
+ goto fail;
+ }
+
+ len = min_t(size_t, size, (size_t)SZ_64K);
+
+ /* Maximize access width based on address and length alignmet */
+ src_acc = min(ffs((u32)src | len) - 1, 4);
+ dst_acc = min(ffs((u32)dst | len) - 1, 4);
+
+ init_descriptor(new, src, src_acc, dst, dst_acc, len);
+
+ if (!first) {
+ first = new;
+ } else {
+ prev->hw.next_ptr = desc_to_paddr(dmac, new);
+ prev->chain = new;
+ }
+ prev = new;
+
+ size -= len;
+ src += len;
+ dst += len;
+
+ } while (size > 0);
+
+ prev->hw.ch_config |= cpu_to_le32(DMA_CONFIG_END);
+
+ return vchan_tx_prep(&dmac->vc, &first->vdesc, DMA_CTRL_ACK);
+
+fail:
+ if (first)
+ free_descriptor(&first->vdesc);
+ return NULL;
+}
+
+/**
+ * gpdma_issue_pending - Push pending transactions to hardware.
+ *
+ */
+static void gpdma_issue_pending(struct dma_chan *chan)
+{
+ struct gpdma_channel *dmac = to_gpdma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dmac->vc.lock, flags);
+ if (vchan_issue_pending(&dmac->vc) && !dmac->active)
+ gpdma_start(dmac);
+ spin_unlock_irqrestore(&dmac->vc.lock, flags);
+}
+
+/**
+ * gpdma_tx_status - Poll for transaction completion, the optional txstate
+ * parameter can be supplied with a pointer to get a struct with auxiliary
+ * transfer status information, otherwise the call will just return a simple
+ * status code.
+ */
+static enum dma_status gpdma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ return dma_cookie_status(chan, cookie, txstate);
+}
+
+
+/**
+ * gpdma_device_control - Manipulate all pending operations on a channel,
+ * returns zero or error code.
+ *
+ */
+static int gpdma_device_control(struct dma_chan *chan,
+ enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ struct gpdma_channel *dmac = to_gpdma_chan(chan);
+
+ if (!dmac)
+ return -EINVAL;
+
+ switch (cmd) {
+ case DMA_TERMINATE_ALL:
+ flush_channel(dmac);
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int setup_channel(struct gpdma_channel *dmac, struct device_node *child)
+{
+ struct gpdma_engine *engine = dmac->engine;
+ int rc;
+
+ dmac->base = engine->iobase + dmac->id * engine->chip->chregs_offset;
+ dev_dbg(engine->dev, "channel%d base @ %p\n", dmac->id, dmac->base);
+
+ /* Find the IRQ line, if it exists in the device tree */
+ dmac->irq = irq_of_parse_and_map(child, 0);
+ dev_dbg(engine->dev, "channel %d, irq %d\n", dmac->id, dmac->irq);
+ rc = devm_request_irq(engine->dev, dmac->irq, gpdma_isr, 0,
+ "lsi-dma", dmac);
+ if (rc) {
+ dev_err(engine->dev, "failed to request_irq, error = %d\n", rc);
+ return rc;
+ }
+ /* Initialize the virt-channel */
+ dmac->vc.desc_free = free_descriptor;
+ vchan_init(&dmac->vc, &engine->dma_device);
+
+ return 0;
+}
+
+static struct lsidma_hw lsi_dma32 = {
+ .num_channels = 2,
+ .chregs_offset = 0x80,
+ .genregs_offset = 0xF00,
+ .flags = (LSIDMA_NEXT_FULL |
+ LSIDMA_SEG_REGS)
+};
+
+static struct lsidma_hw lsi_dma31 = {
+ .num_channels = 4,
+ .chregs_offset = 0x40,
+ .genregs_offset = 0x400,
+ .flags = 0
+};
+
+static const struct of_device_id gpdma_of_ids[] = {
+ {
+ .compatible = "lsi,dma32",
+ .data = &lsi_dma32
+ },
+ {
+ .compatible = "lsi,dma31",
+ .data = &lsi_dma31
+ },
+ {
+ .compatible = "gp-dma,acp-dma",
+ .data = &lsi_dma31
+ },
+ {
+ .compatible = "gp-dma,acp-gpdma",
+ .data = &lsi_dma31
+ },
+ { }
+};
+
+static int gpdma_of_probe(struct platform_device *op)
+{
+ struct gpdma_engine *engine;
+ struct dma_device *dma;
+ struct device_node *child;
+ struct resource *res;
+ const struct of_device_id *match;
+ int rc = -ENOMEM;
+ int id = 0;
+
+ match = of_match_device(gpdma_of_ids, &op->dev);
+ if (!match)
+ return -EINVAL;
+
+ engine = devm_kzalloc(&op->dev, sizeof(*engine), GFP_KERNEL);
+ if (!engine)
+ return -ENOMEM;
+
+ spin_lock_init(&engine->lock);
+ engine->dev = &op->dev;
+ engine->chip = (struct lsidma_hw *)match->data;
+
+ /* Initialize dma_device struct */
+ dma = &engine->dma_device;
+ dma->dev = &op->dev;
+ dma_cap_zero(dma->cap_mask);
+ dma_cap_set(DMA_MEMCPY, dma->cap_mask);
+ dma_cap_set(DMA_SG, dma->cap_mask);
+ dma->copy_align = 2;
+ dma->chancnt = engine->chip->num_channels;
+ dma->device_alloc_chan_resources = gpdma_alloc_chan_resources;
+ dma->device_free_chan_resources = gpdma_free_chan_resources;
+ dma->device_tx_status = gpdma_tx_status;
+ dma->device_prep_dma_memcpy = gpdma_prep_memcpy;
+ dma->device_prep_dma_sg = gpdma_prep_sg;
+ dma->device_issue_pending = gpdma_issue_pending;
+ dma->device_control = gpdma_device_control;
+ INIT_LIST_HEAD(&dma->channels);
+
+ /* Map device I/O memory
+ */
+ res = platform_get_resource(op, IORESOURCE_MEM, 0);
+ engine->iobase = devm_ioremap_resource(&op->dev, res);
+ if (IS_ERR(engine->iobase))
+ return PTR_ERR(engine->iobase);
+ dev_dbg(&op->dev, "mapped base @ %p\n", engine->iobase);
+
+ res = platform_get_resource(op, IORESOURCE_MEM, 1);
+ if (res) {
+ engine->gpreg = devm_ioremap_nocache(&op->dev,
+ res->start,
+ resource_size(res));
+ if (IS_ERR(engine->gpreg))
+ return PTR_ERR(engine->gpreg);
+ dev_dbg(&op->dev, "mapped gpreg @ %p\n", engine->gpreg);
+ }
+
+ engine->err_irq = platform_get_irq(op, 1);
+ if (engine->err_irq) {
+ rc = devm_request_irq(&op->dev, engine->err_irq,
+ gpdma_isr_err, 0, "lsi-dma-err", engine);
+ if (rc) {
+ dev_err(engine->dev, "failed to request irq%d\n",
+ engine->err_irq);
+ engine->err_irq = 0;
+ }
+ }
+
+ /* General registes at device specific offset */
+ engine->gbase = engine->iobase + engine->chip->genregs_offset;
+
+ rc = alloc_desc_table(engine);
+ if (rc)
+ return rc;
+
+ /* Setup channels */
+ for_each_child_of_node(op->dev.of_node, child) {
+ struct gpdma_channel *dmac = &engine->channel[id];
+
+ if (id >= engine->chip->num_channels) {
+ dev_dbg(engine->dev, "Too many channels (%d)\n", id);
+ return -ENODEV;
+ }
+
+ dmac->id = id;
+ dmac->engine = engine;
+ rc = setup_channel(dmac, child);
+ if (rc)
+ return rc;
+ ++id;
+ }
+
+ soft_reset(engine);
+
+ rc = dma_async_device_register(&engine->dma_device);
+ if (rc) {
+ dev_err(engine->dev, "unable to register\n");
+ return rc;
+ }
+
+ device_create_file(&op->dev, &dev_attr_soft_reset);
+ dev_set_drvdata(&op->dev, engine);
+
+ return 0;
+}
+
+static int gpdma_of_remove(struct platform_device *op)
+{
+ struct gpdma_engine *engine = dev_get_drvdata(&op->dev);
+
+ dev_dbg(&op->dev, "%s\n", __func__);
+
+ device_remove_file(&op->dev, &dev_attr_soft_reset);
+ dma_async_device_unregister(&engine->dma_device);
+ free_desc_table(engine);
+ dev_set_drvdata(&op->dev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver gpdma_of_driver = {
+ .driver = {
+ .name = "lsi-dma32",
+ .owner = THIS_MODULE,
+ .of_match_table = gpdma_of_ids,
+ },
+ .probe = gpdma_of_probe,
+ .remove = gpdma_of_remove,
+};
+
+module_platform_driver(gpdma_of_driver);
+
+MODULE_DESCRIPTION("LSI DMA driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/dma/lsi-dma32.h b/drivers/dma/lsi-dma32.h
new file mode 100644
index 0000000..c5a2701
--- /dev/null
+++ b/drivers/dma/lsi-dma32.h
@@ -0,0 +1,221 @@
+/*
+ * Copyright (C) 2012 Ericsson AB. All rights reserved.
+ *
+ * Author:
+ * Kerstin Jonsson <kerstin.jonsson at ericsson.com>, Feb 2012
+ *
+ * This is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+#ifndef __LSI_DMA32_H
+#define __LSI_DMA32_H
+
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma-mapping.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include "virt-dma.h"
+
+#define MAX_GPDMA_CHANNELS 4
+#define GPDMA_MAX_DESCRIPTORS 128
+#define GPDMA_MAGIC 0xABCD1234UL
+
+#define DMA_X_SRC_COUNT 0x00
+#define DMA_Y_SRC_COUNT 0x04
+#define DMA_X_MODIF_SRC 0x08
+#define DMA_Y_MODIF_SRC 0x0c
+#define DMA_SRC_CUR_ADDR 0x10
+#define DMA_SRC_ACCESS 0x14
+#define DMA_SRC_ACCESS_BURST_TYPE (1<<15)
+#define DMA_SRC_ACCESS_TAIL_LENGTH(x) (((x) & 0xF) << 11)
+#define DMA_SRC_ACCESS_ROTATOR_LENGTH(x) (((x) & 1F) << 6)
+#define DMA_SRC_ACCESS_SRC_SIZE(x) (((x) & 7) << 3)
+#define DMA_SRC_ACCESS_SRC_BURST(x) (((x) & 7) << 0)
+#define DMA_SRC_MASK 0x18
+#define DMA_X_DST_COUNT 0x1c
+#define DMA_Y_DST_COUNT 0x20
+#define DMA_X_MODIF_DST 0x24
+#define DMA_Y_MODIF_DST 0x28
+#define DMA_DST_CUR_ADDR 0x2C
+#define DMA_DST_ACCESS 0x30
+#define DMA_DST_ACCESS_DST_SIZE(x) (((x) & 7) << 3)
+#define DMA_DST_ACCESS_DST_BURST(x) (((x) & 7) << 0)
+#define DMA_NXT_DESCR 0x34
+#define DMA_CHANNEL_CONFIG 0x38
+#define DMA_CONFIG_DST_SPACE(x) (((x) & 7) << 26)
+#define DMA_CONFIG_SRC_SPACE(x) (((x) & 7) << 23)
+#define DMA_CONFIG_PRIORITY_ROW (1<<21)
+#define DMA_CONFIG_PRIORITY (1<<20)
+#define DMA_CONFIG_LAST_BLOCK (1<<15)
+#define DMA_CONFIG_CLEAR_FIFO (1<<14)
+#define DMA_CONFIG_START_MEM_LOAD (1<<13)
+#define DMA_CONFIG_STOP_DST_EOB (1<<11)
+#define DMA_CONFIG_FULL_DESCR_ADDR (1<<8)
+#define DMA_CONFIG_INT_DST_EOT (1<<7)
+#define DMA_CONFIG_INT_DST_EOB (1<<6)
+#define DMA_CONFIG_WAIT_FOR_TASK_CNT2 (1<<5)
+#define DMA_CONFIG_TASK_CNT2_RESET (1<<4)
+#define DMA_CONFIG_WAIT_FOR_TASK_CNT1 (1<<3)
+#define DMA_CONFIG_TASK_CNT1_RESET (1<<2)
+#define DMA_CONFIG_TX_EN (1<<1)
+#define DMA_CONFIG_CHAN_EN (1<<0)
+#define DMA_STATUS 0x3C
+#define DMA_STATUS_WAIT_TASK_CNT2 (1<<20)
+#define DMA_STATUS_TASK_CNT2_OVERFLOW (1<<19)
+#define DMA_STATUS_WAIT_TASK_CNT1 (1<<18)
+#define DMA_STATUS_TASK_CNT1_OVERFLOW (1<<17)
+#define DMA_STATUS_CH_PAUS_WR_EN (1<<16)
+#define DMA_STATUS_ERR_ACC_DESCR (1<<14)
+#define DMA_STATUS_ERR_ACC_DST (1<<13)
+#define DMA_STATUS_ERR_ACC_SRC (1<<12)
+#define DMA_STATUS_ERR_OVERFLOW (1<<9)
+#define DMA_STATUS_ERR_UNDERFLOW (1<<8)
+#define DMA_STATUS_CH_PAUSE (1<<7)
+#define DMA_STATUS_CH_WAITING (1<<5)
+#define DMA_STATUS_CH_ACTIVE (1<<4)
+#define DMA_STATUS_TR_COMPLETE (1<<3)
+#define DMA_STATUS_BLK_COMPLETE (1<<2)
+#define DMA_STATUS_UNALIGNED_READ (1<<1)
+#define DMA_STATUS_UNALIGNED_WRITE (1<<0)
+#define DMA_STATUS_UNALIGNED_ERR (DMA_STATUS_UNALIGNED_READ | \
+ DMA_STATUS_UNALIGNED_WRITE)
+#define DMA_TASK_CNT_1 0x40
+#define DMA_TASK_CNT_2 0x44
+#define DMA_MODE_CONFIG 0x48
+#define DMA_CURR_DESCR 0x4c
+#define DMA_PREV_DESCR 0x50
+#define DMA_SRC_ADDR_SEG 0x54
+#define DMA_DST_ADDR_SEG 0x58
+#define DMA_DESCR_ADDR_SEG 0x5c
+
+#define DMA_STATUS_ERROR (DMA_STATUS_ERR_ACC_DESCR | \
+ DMA_STATUS_ERR_ACC_DST | \
+ DMA_STATUS_ERR_ACC_SRC | \
+ DMA_STATUS_ERR_OVERFLOW | \
+ DMA_STATUS_ERR_UNDERFLOW | \
+ DMA_STATUS_UNALIGNED_ERR)
+
+#define DMA_STATUS_CLEAR (DMA_STATUS_CH_PAUS_WR_EN | \
+ DMA_STATUS_TR_COMPLETE | \
+ DMA_STATUS_BLK_COMPLETE)
+
+#define DMA_CONFIG_END (DMA_CONFIG_LAST_BLOCK | \
+ DMA_CONFIG_INT_DST_EOT)
+
+#define DMA_CONFIG_ONE_SHOT(__ext) (DMA_CONFIG_DST_SPACE((__ext)) | \
+ DMA_CONFIG_SRC_SPACE((__ext)) | \
+ DMA_CONFIG_TX_EN | \
+ DMA_CONFIG_CHAN_EN)
+
+#define DMA_CONFIG_DSC_LOAD (DMA_CONFIG_START_MEM_LOAD | \
+ DMA_CONFIG_FULL_DESCR_ADDR | \
+ DMA_CONFIG_CHAN_EN)
+
+#define GEN_STAT 0x0
+#define GEN_STAT_CH0_ACTIVE (1<<0)
+#define GEN_STAT_CH1_ACTIVE (1<<2)
+#define GEN_STAT_CH1_ACTIVE (1<<2)
+#define GEN_STAT_CH0_ERROR (1<<16)
+#define GEN_STAT_CH1_ERROR (1<<17)
+#define GEN_CONFIG 0x4
+#define GEN_CONFIG_EXT_MEM (1<<19)
+#define GEN_CONFIG_INT_EDGE(_ch) (1<<(_ch))
+#define SOFT_RESET 0x8
+
+#define GPDMA_GEN_STAT(__p) ((__p)->gbase + GEN_STAT)
+#define GPDMA_GEN_CONFIG(__p) ((__p)->gbase + GEN_CONFIG)
+#define GPDMA_SOFT_RESET(__p) ((__p)->gbase + SOFT_RESET)
+
+
+struct descriptor {
+ u16 src_x_ctr;
+ u16 src_y_ctr;
+ s32 src_x_mod;
+ s32 src_y_mod;
+ u32 src_addr;
+ u32 src_data_mask;
+ u16 src_access;
+ u16 dst_access;
+ u32 ch_config;
+ u32 next_ptr;
+ u16 dst_x_ctr;
+ u16 dst_y_ctr;
+ s32 dst_x_mod;
+ s32 dst_y_mod;
+ u32 dst_addr;
+} __aligned(32);
+
+struct gpdma_engine;
+
+struct gpdma_desc {
+ struct descriptor hw;
+ struct gpdma_desc *chain;
+ dma_addr_t src;
+ dma_addr_t dst;
+ struct gpdma_engine *engine;
+ struct virt_dma_desc vdesc;
+} __aligned(32);
+
+static struct gpdma_desc *to_gpdma_desc(struct virt_dma_desc *vdesc)
+{
+ return container_of(vdesc, struct gpdma_desc, vdesc);
+}
+
+struct gpdma_channel {
+ /* Back reference to DMA engine */
+ struct gpdma_engine *engine;
+ /* Channel registers */
+ void __iomem *base;
+ /* Channel id */
+ int id;
+ /* IRQ number as passed to request_irq() */
+ int irq;
+ /* Currently running descriptor */
+ struct gpdma_desc *active;
+ /* Channel parameters (DMA engine framework) */
+ struct virt_dma_chan vc;
+};
+
+static inline struct gpdma_channel *to_gpdma_chan(struct dma_chan *chan)
+{
+ return container_of(chan, struct gpdma_channel, vc.chan);
+}
+
+struct lsidma_hw {
+ unsigned int num_channels;
+ unsigned int chregs_offset;
+ unsigned int genregs_offset;
+ unsigned int flags;
+#define LSIDMA_NEXT_FULL (1<<0)
+#define LSIDMA_SEG_REGS (1<<1)
+#define LSIDMA_EDGE_INT (1<<2)
+};
+
+struct gpdma_engine {
+ struct device *dev;
+ struct lsidma_hw *chip;
+ struct gpdma_channel channel[MAX_GPDMA_CHANNELS];
+ /** Bit mask where bit[n] == 1 if channel busy */
+ unsigned long ch_busy;
+ int err_irq;
+ void __iomem *iobase;
+ void __iomem *gbase;
+ void __iomem *gpreg;
+ spinlock_t lock;
+ struct list_head free_list;
+ struct {
+ u32 order;
+ dma_addr_t phys;
+ struct gpdma_desc *va;
+ } pool;
+ struct dma_device dma_device;
+};
+
+#define desc_to_engine(n) container_of(n, struct gpdma_engine, desc)
+
+#endif
--
1.7.9.5
More information about the linux-yocto
mailing list