ARM: davinci/common: Convert edma driver to handle one eDMA instance per driver
[deliverable/linux.git] / arch / arm / common / edma.c
index 873dbfcc7dc9db9278e0727a20c30f53df4db1bf..7c2fe527e53bea8ab779abfee110a899fb55eb0e 100644 (file)
@@ -235,6 +235,7 @@ static inline void clear_bits(int offset, int len, unsigned long *p)
 
 /* actual number of DMA channels and slots on this silicon */
 struct edma {
+       struct device   *dev;
        /* how many dma resources of each type */
        unsigned        num_channels;
        unsigned        num_region;
@@ -246,6 +247,7 @@ struct edma {
        const s8        *noevent;
 
        struct edma_soc_info *info;
+       int             id;
 
        /* The edma_inuse bit for each PaRAM slot is clear unless the
         * channel is in use ... by ARM or DSP, for QDMA, or whatever.
@@ -258,9 +260,6 @@ struct edma {
         */
        DECLARE_BITMAP(edma_unused, EDMA_MAX_DMACH);
 
-       unsigned        irq_res_start;
-       unsigned        irq_res_end;
-
        struct dma_interrupt_data {
                void (*callback)(unsigned channel, unsigned short ch_status,
                                void *data);
@@ -349,17 +348,6 @@ setup_dma_interrupt(unsigned lch,
        }
 }
 
-static int irq2ctlr(int irq)
-{
-       if (irq >= edma_cc[0]->irq_res_start && irq <= edma_cc[0]->irq_res_end)
-               return 0;
-       else if (irq >= edma_cc[1]->irq_res_start &&
-               irq <= edma_cc[1]->irq_res_end)
-               return 1;
-
-       return -1;
-}
-
 /******************************************************************************
  *
  * DMA interrupt handler
@@ -367,16 +355,17 @@ static int irq2ctlr(int irq)
  *****************************************************************************/
 static irqreturn_t dma_irq_handler(int irq, void *data)
 {
+       struct edma *cc = data;
        int ctlr;
        u32 sh_ier;
        u32 sh_ipr;
        u32 bank;
 
-       ctlr = irq2ctlr(irq);
+       ctlr = cc->id;
        if (ctlr < 0)
                return IRQ_NONE;
 
-       dev_dbg(data, "dma_irq_handler\n");
+       dev_dbg(cc->dev, "dma_irq_handler\n");
 
        sh_ipr = edma_shadow0_read_array(ctlr, SH_IPR, 0);
        if (!sh_ipr) {
@@ -394,7 +383,7 @@ static irqreturn_t dma_irq_handler(int irq, void *data)
                u32 slot;
                u32 channel;
 
-               dev_dbg(data, "IPR%d %08x\n", bank, sh_ipr);
+               dev_dbg(cc->dev, "IPR%d %08x\n", bank, sh_ipr);
 
                slot = __ffs(sh_ipr);
                sh_ipr &= ~(BIT(slot));
@@ -404,10 +393,11 @@ static irqreturn_t dma_irq_handler(int irq, void *data)
                        /* Clear the corresponding IPR bits */
                        edma_shadow0_write_array(ctlr, SH_ICR, bank,
                                        BIT(slot));
-                       if (edma_cc[ctlr]->intr_data[channel].callback)
-                               edma_cc[ctlr]->intr_data[channel].callback(
-                                       channel, EDMA_DMA_COMPLETE,
-                                       edma_cc[ctlr]->intr_data[channel].data);
+                       if (cc->intr_data[channel].callback)
+                               cc->intr_data[channel].callback(
+                                       EDMA_CTLR_CHAN(ctlr, channel),
+                                       EDMA_DMA_COMPLETE,
+                                       cc->intr_data[channel].data);
                }
        } while (sh_ipr);
 
@@ -422,15 +412,16 @@ static irqreturn_t dma_irq_handler(int irq, void *data)
  *****************************************************************************/
 static irqreturn_t dma_ccerr_handler(int irq, void *data)
 {
+       struct edma *cc = data;
        int i;
        int ctlr;
        unsigned int cnt = 0;
 
-       ctlr = irq2ctlr(irq);
+       ctlr = cc->id;
        if (ctlr < 0)
                return IRQ_NONE;
 
-       dev_dbg(data, "dma_ccerr_handler\n");
+       dev_dbg(cc->dev, "dma_ccerr_handler\n");
 
        if ((edma_read_array(ctlr, EDMA_EMR, 0) == 0) &&
            (edma_read_array(ctlr, EDMA_EMR, 1) == 0) &&
@@ -445,8 +436,8 @@ static irqreturn_t dma_ccerr_handler(int irq, void *data)
                else if (edma_read_array(ctlr, EDMA_EMR, 1))
                        j = 1;
                if (j >= 0) {
-                       dev_dbg(data, "EMR%d %08x\n", j,
-                                       edma_read_array(ctlr, EDMA_EMR, j));
+                       dev_dbg(cc->dev, "EMR%d %08x\n", j,
+                               edma_read_array(ctlr, EDMA_EMR, j));
                        for (i = 0; i < 32; i++) {
                                int k = (j << 5) + i;
                                if (edma_read_array(ctlr, EDMA_EMR, j) &
@@ -457,18 +448,16 @@ static irqreturn_t dma_ccerr_handler(int irq, void *data)
                                        /* Clear any SER */
                                        edma_shadow0_write_array(ctlr, SH_SECR,
                                                                j, BIT(i));
-                                       if (edma_cc[ctlr]->intr_data[k].
-                                                               callback) {
-                                               edma_cc[ctlr]->intr_data[k].
-                                               callback(k,
-                                               EDMA_DMA_CC_ERROR,
-                                               edma_cc[ctlr]->intr_data
-                                               [k].data);
+                                       if (cc->intr_data[k].callback) {
+                                               cc->intr_data[k].callback(
+                                                       EDMA_CTLR_CHAN(ctlr, k),
+                                                       EDMA_DMA_CC_ERROR,
+                                                       cc->intr_data[k].data);
                                        }
                                }
                        }
                } else if (edma_read(ctlr, EDMA_QEMR)) {
-                       dev_dbg(data, "QEMR %02x\n",
+                       dev_dbg(cc->dev, "QEMR %02x\n",
                                edma_read(ctlr, EDMA_QEMR));
                        for (i = 0; i < 8; i++) {
                                if (edma_read(ctlr, EDMA_QEMR) & BIT(i)) {
@@ -481,7 +470,7 @@ static irqreturn_t dma_ccerr_handler(int irq, void *data)
                                }
                        }
                } else if (edma_read(ctlr, EDMA_CCERR)) {
-                       dev_dbg(data, "CCERR %08x\n",
+                       dev_dbg(cc->dev, "CCERR %08x\n",
                                edma_read(ctlr, EDMA_CCERR));
                        /* FIXME:  CCERR.BIT(16) ignored!  much better
                         * to just write CCERRCLR with CCERR value...
@@ -508,62 +497,6 @@ static irqreturn_t dma_ccerr_handler(int irq, void *data)
        return IRQ_HANDLED;
 }
 
-static int reserve_contiguous_slots(int ctlr, unsigned int id,
-                                    unsigned int num_slots,
-                                    unsigned int start_slot)
-{
-       int i, j;
-       unsigned int count = num_slots;
-       int stop_slot = start_slot;
-       DECLARE_BITMAP(tmp_inuse, EDMA_MAX_PARAMENTRY);
-
-       for (i = start_slot; i < edma_cc[ctlr]->num_slots; ++i) {
-               j = EDMA_CHAN_SLOT(i);
-               if (!test_and_set_bit(j, edma_cc[ctlr]->edma_inuse)) {
-                       /* Record our current beginning slot */
-                       if (count == num_slots)
-                               stop_slot = i;
-
-                       count--;
-                       set_bit(j, tmp_inuse);
-
-                       if (count == 0)
-                               break;
-               } else {
-                       clear_bit(j, tmp_inuse);
-
-                       if (id == EDMA_CONT_PARAMS_FIXED_EXACT) {
-                               stop_slot = i;
-                               break;
-                       } else {
-                               count = num_slots;
-                       }
-               }
-       }
-
-       /*
-        * We have to clear any bits that we set
-        * if we run out parameter RAM slots, i.e we do find a set
-        * of contiguous parameter RAM slots but do not find the exact number
-        * requested as we may reach the total number of parameter RAM slots
-        */
-       if (i == edma_cc[ctlr]->num_slots)
-               stop_slot = i;
-
-       j = start_slot;
-       for_each_set_bit_from(j, tmp_inuse, stop_slot)
-               clear_bit(j, edma_cc[ctlr]->edma_inuse);
-
-       if (count)
-               return -EBUSY;
-
-       for (j = i - num_slots + 1; j <= i; ++j)
-               memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(j),
-                       &dummy_paramset, PARM_SIZE);
-
-       return EDMA_CTLR_CHAN(ctlr, i - num_slots + 1);
-}
-
 static int prepare_unused_channel_list(struct device *dev, void *data)
 {
        struct platform_device *pdev = to_platform_device(dev);
@@ -816,185 +749,10 @@ void edma_free_slot(unsigned slot)
 }
 EXPORT_SYMBOL(edma_free_slot);
 
-
-/**
- * edma_alloc_cont_slots- alloc contiguous parameter RAM slots
- * The API will return the starting point of a set of
- * contiguous parameter RAM slots that have been requested
- *
- * @id: can only be EDMA_CONT_PARAMS_ANY or EDMA_CONT_PARAMS_FIXED_EXACT
- * or EDMA_CONT_PARAMS_FIXED_NOT_EXACT
- * @count: number of contiguous Paramter RAM slots
- * @slot  - the start value of Parameter RAM slot that should be passed if id
- * is EDMA_CONT_PARAMS_FIXED_EXACT or EDMA_CONT_PARAMS_FIXED_NOT_EXACT
- *
- * If id is EDMA_CONT_PARAMS_ANY then the API starts looking for a set of
- * contiguous Parameter RAM slots from parameter RAM 64 in the case of
- * DaVinci SOCs and 32 in the case of DA8xx SOCs.
- *
- * If id is EDMA_CONT_PARAMS_FIXED_EXACT then the API starts looking for a
- * set of contiguous parameter RAM slots from the "slot" that is passed as an
- * argument to the API.
- *
- * If id is EDMA_CONT_PARAMS_FIXED_NOT_EXACT then the API initially tries
- * starts looking for a set of contiguous parameter RAMs from the "slot"
- * that is passed as an argument to the API. On failure the API will try to
- * find a set of contiguous Parameter RAM slots from the remaining Parameter
- * RAM slots
- */
-int edma_alloc_cont_slots(unsigned ctlr, unsigned int id, int slot, int count)
-{
-       /*
-        * The start slot requested should be greater than
-        * the number of channels and lesser than the total number
-        * of slots
-        */
-       if ((id != EDMA_CONT_PARAMS_ANY) &&
-               (slot < edma_cc[ctlr]->num_channels ||
-               slot >= edma_cc[ctlr]->num_slots))
-               return -EINVAL;
-
-       /*
-        * The number of parameter RAM slots requested cannot be less than 1
-        * and cannot be more than the number of slots minus the number of
-        * channels
-        */
-       if (count < 1 || count >
-               (edma_cc[ctlr]->num_slots - edma_cc[ctlr]->num_channels))
-               return -EINVAL;
-
-       switch (id) {
-       case EDMA_CONT_PARAMS_ANY:
-               return reserve_contiguous_slots(ctlr, id, count,
-                                                edma_cc[ctlr]->num_channels);
-       case EDMA_CONT_PARAMS_FIXED_EXACT:
-       case EDMA_CONT_PARAMS_FIXED_NOT_EXACT:
-               return reserve_contiguous_slots(ctlr, id, count, slot);
-       default:
-               return -EINVAL;
-       }
-
-}
-EXPORT_SYMBOL(edma_alloc_cont_slots);
-
-/**
- * edma_free_cont_slots - deallocate DMA parameter RAM slots
- * @slot: first parameter RAM of a set of parameter RAM slots to be freed
- * @count: the number of contiguous parameter RAM slots to be freed
- *
- * This deallocates the parameter RAM slots allocated by
- * edma_alloc_cont_slots.
- * Callers/applications need to keep track of sets of contiguous
- * parameter RAM slots that have been allocated using the edma_alloc_cont_slots
- * API.
- * Callers are responsible for ensuring the slots are inactive, and will
- * not be activated.
- */
-int edma_free_cont_slots(unsigned slot, int count)
-{
-       unsigned ctlr, slot_to_free;
-       int i;
-
-       ctlr = EDMA_CTLR(slot);
-       slot = EDMA_CHAN_SLOT(slot);
-
-       if (slot < edma_cc[ctlr]->num_channels ||
-               slot >= edma_cc[ctlr]->num_slots ||
-               count < 1)
-               return -EINVAL;
-
-       for (i = slot; i < slot + count; ++i) {
-               ctlr = EDMA_CTLR(i);
-               slot_to_free = EDMA_CHAN_SLOT(i);
-
-               memcpy_toio(edmacc_regs_base[ctlr] + PARM_OFFSET(slot_to_free),
-                       &dummy_paramset, PARM_SIZE);
-               clear_bit(slot_to_free, edma_cc[ctlr]->edma_inuse);
-       }
-
-       return 0;
-}
-EXPORT_SYMBOL(edma_free_cont_slots);
-
 /*-----------------------------------------------------------------------*/
 
 /* Parameter RAM operations (i) -- read/write partial slots */
 
-/**
- * edma_set_src - set initial DMA source address in parameter RAM slot
- * @slot: parameter RAM slot being configured
- * @src_port: physical address of source (memory, controller FIFO, etc)
- * @addressMode: INCR, except in very rare cases
- * @fifoWidth: ignored unless @addressMode is FIFO, else specifies the
- *     width to use when addressing the fifo (e.g. W8BIT, W32BIT)
- *
- * Note that the source address is modified during the DMA transfer
- * according to edma_set_src_index().
- */
-void edma_set_src(unsigned slot, dma_addr_t src_port,
-                               enum address_mode mode, enum fifo_width width)
-{
-       unsigned ctlr;
-
-       ctlr = EDMA_CTLR(slot);
-       slot = EDMA_CHAN_SLOT(slot);
-
-       if (slot < edma_cc[ctlr]->num_slots) {
-               unsigned int i = edma_parm_read(ctlr, PARM_OPT, slot);
-
-               if (mode) {
-                       /* set SAM and program FWID */
-                       i = (i & ~(EDMA_FWID)) | (SAM | ((width & 0x7) << 8));
-               } else {
-                       /* clear SAM */
-                       i &= ~SAM;
-               }
-               edma_parm_write(ctlr, PARM_OPT, slot, i);
-
-               /* set the source port address
-                  in source register of param structure */
-               edma_parm_write(ctlr, PARM_SRC, slot, src_port);
-       }
-}
-EXPORT_SYMBOL(edma_set_src);
-
-/**
- * edma_set_dest - set initial DMA destination address in parameter RAM slot
- * @slot: parameter RAM slot being configured
- * @dest_port: physical address of destination (memory, controller FIFO, etc)
- * @addressMode: INCR, except in very rare cases
- * @fifoWidth: ignored unless @addressMode is FIFO, else specifies the
- *     width to use when addressing the fifo (e.g. W8BIT, W32BIT)
- *
- * Note that the destination address is modified during the DMA transfer
- * according to edma_set_dest_index().
- */
-void edma_set_dest(unsigned slot, dma_addr_t dest_port,
-                                enum address_mode mode, enum fifo_width width)
-{
-       unsigned ctlr;
-
-       ctlr = EDMA_CTLR(slot);
-       slot = EDMA_CHAN_SLOT(slot);
-
-       if (slot < edma_cc[ctlr]->num_slots) {
-               unsigned int i = edma_parm_read(ctlr, PARM_OPT, slot);
-
-               if (mode) {
-                       /* set DAM and program FWID */
-                       i = (i & ~(EDMA_FWID)) | (DAM | ((width & 0x7) << 8));
-               } else {
-                       /* clear DAM */
-                       i &= ~DAM;
-               }
-               edma_parm_write(ctlr, PARM_OPT, slot, i);
-               /* set the destination port address
-                  in dest register of param structure */
-               edma_parm_write(ctlr, PARM_DST, slot, dest_port);
-       }
-}
-EXPORT_SYMBOL(edma_set_dest);
-
 /**
  * edma_get_position - returns the current transfer point
  * @slot: parameter RAM slot being examined
@@ -1014,110 +772,6 @@ dma_addr_t edma_get_position(unsigned slot, bool dst)
        return edma_read(ctlr, offs);
 }
 
-/**
- * edma_set_src_index - configure DMA source address indexing
- * @slot: parameter RAM slot being configured
- * @src_bidx: byte offset between source arrays in a frame
- * @src_cidx: byte offset between source frames in a block
- *
- * Offsets are specified to support either contiguous or discontiguous
- * memory transfers, or repeated access to a hardware register, as needed.
- * When accessing hardware registers, both offsets are normally zero.
- */
-void edma_set_src_index(unsigned slot, s16 src_bidx, s16 src_cidx)
-{
-       unsigned ctlr;
-
-       ctlr = EDMA_CTLR(slot);
-       slot = EDMA_CHAN_SLOT(slot);
-
-       if (slot < edma_cc[ctlr]->num_slots) {
-               edma_parm_modify(ctlr, PARM_SRC_DST_BIDX, slot,
-                               0xffff0000, src_bidx);
-               edma_parm_modify(ctlr, PARM_SRC_DST_CIDX, slot,
-                               0xffff0000, src_cidx);
-       }
-}
-EXPORT_SYMBOL(edma_set_src_index);
-
-/**
- * edma_set_dest_index - configure DMA destination address indexing
- * @slot: parameter RAM slot being configured
- * @dest_bidx: byte offset between destination arrays in a frame
- * @dest_cidx: byte offset between destination frames in a block
- *
- * Offsets are specified to support either contiguous or discontiguous
- * memory transfers, or repeated access to a hardware register, as needed.
- * When accessing hardware registers, both offsets are normally zero.
- */
-void edma_set_dest_index(unsigned slot, s16 dest_bidx, s16 dest_cidx)
-{
-       unsigned ctlr;
-
-       ctlr = EDMA_CTLR(slot);
-       slot = EDMA_CHAN_SLOT(slot);
-
-       if (slot < edma_cc[ctlr]->num_slots) {
-               edma_parm_modify(ctlr, PARM_SRC_DST_BIDX, slot,
-                               0x0000ffff, dest_bidx << 16);
-               edma_parm_modify(ctlr, PARM_SRC_DST_CIDX, slot,
-                               0x0000ffff, dest_cidx << 16);
-       }
-}
-EXPORT_SYMBOL(edma_set_dest_index);
-
-/**
- * edma_set_transfer_params - configure DMA transfer parameters
- * @slot: parameter RAM slot being configured
- * @acnt: how many bytes per array (at least one)
- * @bcnt: how many arrays per frame (at least one)
- * @ccnt: how many frames per block (at least one)
- * @bcnt_rld: used only for A-Synchronized transfers; this specifies
- *     the value to reload into bcnt when it decrements to zero
- * @sync_mode: ASYNC or ABSYNC
- *
- * See the EDMA3 documentation to understand how to configure and link
- * transfers using the fields in PaRAM slots.  If you are not doing it
- * all at once with edma_write_slot(), you will use this routine
- * plus two calls each for source and destination, setting the initial
- * address and saying how to index that address.
- *
- * An example of an A-Synchronized transfer is a serial link using a
- * single word shift register.  In that case, @acnt would be equal to
- * that word size; the serial controller issues a DMA synchronization
- * event to transfer each word, and memory access by the DMA transfer
- * controller will be word-at-a-time.
- *
- * An example of an AB-Synchronized transfer is a device using a FIFO.
- * In that case, @acnt equals the FIFO width and @bcnt equals its depth.
- * The controller with the FIFO issues DMA synchronization events when
- * the FIFO threshold is reached, and the DMA transfer controller will
- * transfer one frame to (or from) the FIFO.  It will probably use
- * efficient burst modes to access memory.
- */
-void edma_set_transfer_params(unsigned slot,
-               u16 acnt, u16 bcnt, u16 ccnt,
-               u16 bcnt_rld, enum sync_dimension sync_mode)
-{
-       unsigned ctlr;
-
-       ctlr = EDMA_CTLR(slot);
-       slot = EDMA_CHAN_SLOT(slot);
-
-       if (slot < edma_cc[ctlr]->num_slots) {
-               edma_parm_modify(ctlr, PARM_LINK_BCNTRLD, slot,
-                               0x0000ffff, bcnt_rld << 16);
-               if (sync_mode == ASYNC)
-                       edma_parm_and(ctlr, PARM_OPT, slot, ~SYNCDIM);
-               else
-                       edma_parm_or(ctlr, PARM_OPT, slot, SYNCDIM);
-               /* Set the acount, bcount, ccount registers */
-               edma_parm_write(ctlr, PARM_A_B_CNT, slot, (bcnt << 16) | acnt);
-               edma_parm_write(ctlr, PARM_CCNT, slot, ccnt);
-       }
-}
-EXPORT_SYMBOL(edma_set_transfer_params);
-
 /**
  * edma_link - link one parameter RAM slot to another
  * @from: parameter RAM slot originating the link
@@ -1143,26 +797,6 @@ void edma_link(unsigned from, unsigned to)
 }
 EXPORT_SYMBOL(edma_link);
 
-/**
- * edma_unlink - cut link from one parameter RAM slot
- * @from: parameter RAM slot originating the link
- *
- * The originating slot should not be part of any active DMA transfer.
- * Its link is set to 0xffff.
- */
-void edma_unlink(unsigned from)
-{
-       unsigned ctlr;
-
-       ctlr = EDMA_CTLR(from);
-       from = EDMA_CHAN_SLOT(from);
-
-       if (from >= edma_cc[ctlr]->num_slots)
-               return;
-       edma_parm_or(ctlr, PARM_LINK_BCNTRLD, from, 0xffff);
-}
-EXPORT_SYMBOL(edma_unlink);
-
 /*-----------------------------------------------------------------------*/
 
 /* Parameter RAM operations (ii) -- read/write whole parameter sets */
@@ -1399,27 +1033,6 @@ void edma_clean_channel(unsigned channel)
 }
 EXPORT_SYMBOL(edma_clean_channel);
 
-/*
- * edma_clear_event - clear an outstanding event on the DMA channel
- * Arguments:
- *     channel - channel number
- */
-void edma_clear_event(unsigned channel)
-{
-       unsigned ctlr;
-
-       ctlr = EDMA_CTLR(channel);
-       channel = EDMA_CHAN_SLOT(channel);
-
-       if (channel >= edma_cc[ctlr]->num_channels)
-               return;
-       if (channel < 32)
-               edma_write(ctlr, EDMA_ECR, BIT(channel));
-       else
-               edma_write(ctlr, EDMA_ECRH, BIT(channel - 32));
-}
-EXPORT_SYMBOL(edma_clear_event);
-
 /*
  * edma_assign_channel_eventq - move given channel to desired eventq
  * Arguments:
@@ -1613,21 +1226,19 @@ static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev,
 
 static int edma_probe(struct platform_device *pdev)
 {
-       struct edma_soc_info    **info = pdev->dev.platform_data;
-       struct edma_soc_info    *ninfo[EDMA_MAX_CC] = {NULL};
+       struct edma_soc_info    *info = pdev->dev.platform_data;
        s8              (*queue_priority_mapping)[2];
-       int                     i, j, off, ln, found = 0;
-       int                     status = -1;
+       int                     i, off, ln;
        const s16               (*rsv_chans)[2];
        const s16               (*rsv_slots)[2];
        const s16               (*xbar_chans)[2];
-       int                     irq[EDMA_MAX_CC] = {0, 0};
-       int                     err_irq[EDMA_MAX_CC] = {0, 0};
-       struct resource         *r[EDMA_MAX_CC] = {NULL};
-       struct resource         res[EDMA_MAX_CC];
-       char                    res_name[10];
+       int                     irq;
+       char                    *irq_name;
+       struct resource         *mem;
        struct device_node      *node = pdev->dev.of_node;
        struct device           *dev = &pdev->dev;
+       int                     dev_id = pdev->id;
+       struct edma             *cc;
        int                     ret;
        struct platform_device_info edma_dev_info = {
                .name = "edma-dma-engine",
@@ -1635,6 +1246,17 @@ static int edma_probe(struct platform_device *pdev)
                .parent = &pdev->dev,
        };
 
+       /* When booting with DT the pdev->id is -1 */
+       if (dev_id < 0)
+               dev_id = arch_num_cc;
+
+       if (dev_id >= EDMA_MAX_CC) {
+               dev_err(dev,
+                       "eDMA3 with device id 0 and 1 is supported (id: %d)\n",
+                       dev_id);
+               return -EINVAL;
+       }
+
        if (node) {
                /* Check if this is a second instance registered */
                if (arch_num_cc) {
@@ -1642,13 +1264,11 @@ static int edma_probe(struct platform_device *pdev)
                        return -ENODEV;
                }
 
-               ninfo[0] = edma_setup_info_from_dt(dev, node);
-               if (IS_ERR(ninfo[0])) {
+               info = edma_setup_info_from_dt(dev, node);
+               if (IS_ERR(info)) {
                        dev_err(dev, "failed to get DT data\n");
-                       return PTR_ERR(ninfo[0]);
+                       return PTR_ERR(info);
                }
-
-               info = ninfo;
        }
 
        if (!info)
@@ -1661,154 +1281,132 @@ static int edma_probe(struct platform_device *pdev)
                return ret;
        }
 
-       for (j = 0; j < EDMA_MAX_CC; j++) {
-               if (!info[j]) {
-                       if (!found)
-                               return -ENODEV;
-                       break;
-               }
-               if (node) {
-                       ret = of_address_to_resource(node, j, &res[j]);
-                       if (!ret)
-                               r[j] = &res[j];
-               } else {
-                       sprintf(res_name, "edma_cc%d", j);
-                       r[j] = platform_get_resource_byname(pdev,
-                                               IORESOURCE_MEM,
-                                               res_name);
-               }
-               if (!r[j]) {
-                       if (found)
-                               break;
-                       else
-                               return -ENODEV;
-               } else {
-                       found = 1;
+       mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "edma3_cc");
+       if (!mem) {
+               dev_dbg(dev, "mem resource not found, using index 0\n");
+               mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+               if (!mem) {
+                       dev_err(dev, "no mem resource?\n");
+                       return -ENODEV;
                }
+       }
 
-               edmacc_regs_base[j] = devm_ioremap_resource(&pdev->dev, r[j]);
-               if (IS_ERR(edmacc_regs_base[j]))
-                       return PTR_ERR(edmacc_regs_base[j]);
+       edmacc_regs_base[dev_id] = devm_ioremap_resource(dev, mem);
+       if (IS_ERR(edmacc_regs_base[dev_id]))
+               return PTR_ERR(edmacc_regs_base[dev_id]);
 
-               edma_cc[j] = devm_kzalloc(&pdev->dev, sizeof(struct edma),
-                                         GFP_KERNEL);
-               if (!edma_cc[j])
-                       return -ENOMEM;
+       edma_cc[dev_id] = devm_kzalloc(dev, sizeof(struct edma), GFP_KERNEL);
+       if (!edma_cc[dev_id])
+               return -ENOMEM;
 
-               /* Get eDMA3 configuration from IP */
-               ret = edma_setup_from_hw(dev, info[j], edma_cc[j], j);
-               if (ret)
-                       return ret;
+       cc = edma_cc[dev_id];
+       cc->dev = dev;
+       cc->id = dev_id;
+       dev_set_drvdata(dev, cc);
 
-               edma_cc[j]->default_queue = info[j]->default_queue;
+       /* Get eDMA3 configuration from IP */
+       ret = edma_setup_from_hw(dev, info, cc, dev_id);
+       if (ret)
+               return ret;
 
-               dev_dbg(&pdev->dev, "DMA REG BASE ADDR=%p\n",
-                       edmacc_regs_base[j]);
+       cc->default_queue = info->default_queue;
 
-               for (i = 0; i < edma_cc[j]->num_slots; i++)
-                       memcpy_toio(edmacc_regs_base[j] + PARM_OFFSET(i),
-                                       &dummy_paramset, PARM_SIZE);
+       dev_dbg(dev, "DMA REG BASE ADDR=%p\n", edmacc_regs_base[dev_id]);
 
-               /* Mark all channels as unused */
-               memset(edma_cc[j]->edma_unused, 0xff,
-                       sizeof(edma_cc[j]->edma_unused));
+       for (i = 0; i < cc->num_slots; i++)
+               memcpy_toio(edmacc_regs_base[dev_id] + PARM_OFFSET(i),
+                           &dummy_paramset, PARM_SIZE);
 
-               if (info[j]->rsv) {
+       /* Mark all channels as unused */
+       memset(cc->edma_unused, 0xff, sizeof(cc->edma_unused));
 
-                       /* Clear the reserved channels in unused list */
-                       rsv_chans = info[j]->rsv->rsv_chans;
-                       if (rsv_chans) {
-                               for (i = 0; rsv_chans[i][0] != -1; i++) {
-                                       off = rsv_chans[i][0];
-                                       ln = rsv_chans[i][1];
-                                       clear_bits(off, ln,
-                                                 edma_cc[j]->edma_unused);
-                               }
-                       }
+       if (info->rsv) {
 
-                       /* Set the reserved slots in inuse list */
-                       rsv_slots = info[j]->rsv->rsv_slots;
-                       if (rsv_slots) {
-                               for (i = 0; rsv_slots[i][0] != -1; i++) {
-                                       off = rsv_slots[i][0];
-                                       ln = rsv_slots[i][1];
-                                       set_bits(off, ln,
-                                               edma_cc[j]->edma_inuse);
-                               }
+               /* Clear the reserved channels in unused list */
+               rsv_chans = info->rsv->rsv_chans;
+               if (rsv_chans) {
+                       for (i = 0; rsv_chans[i][0] != -1; i++) {
+                               off = rsv_chans[i][0];
+                               ln = rsv_chans[i][1];
+                               clear_bits(off, ln, cc->edma_unused);
                        }
                }
 
-               /* Clear the xbar mapped channels in unused list */
-               xbar_chans = info[j]->xbar_chans;
-               if (xbar_chans) {
-                       for (i = 0; xbar_chans[i][1] != -1; i++) {
-                               off = xbar_chans[i][1];
-                               clear_bits(off, 1,
-                                          edma_cc[j]->edma_unused);
+               /* Set the reserved slots in inuse list */
+               rsv_slots = info->rsv->rsv_slots;
+               if (rsv_slots) {
+                       for (i = 0; rsv_slots[i][0] != -1; i++) {
+                               off = rsv_slots[i][0];
+                               ln = rsv_slots[i][1];
+                               set_bits(off, ln, cc->edma_inuse);
                        }
                }
+       }
 
-               if (node) {
-                       irq[j] = irq_of_parse_and_map(node, 0);
-                       err_irq[j] = irq_of_parse_and_map(node, 2);
-               } else {
-                       char irq_name[10];
-
-                       sprintf(irq_name, "edma%d", j);
-                       irq[j] = platform_get_irq_byname(pdev, irq_name);
-
-                       sprintf(irq_name, "edma%d_err", j);
-                       err_irq[j] = platform_get_irq_byname(pdev, irq_name);
-               }
-               edma_cc[j]->irq_res_start = irq[j];
-               edma_cc[j]->irq_res_end = err_irq[j];
-
-               status = devm_request_irq(dev, irq[j], dma_irq_handler, 0,
-                                         "edma", dev);
-               if (status < 0) {
-                       dev_dbg(&pdev->dev,
-                               "devm_request_irq %d failed --> %d\n",
-                               irq[j], status);
-                       return status;
+       /* Clear the xbar mapped channels in unused list */
+       xbar_chans = info->xbar_chans;
+       if (xbar_chans) {
+               for (i = 0; xbar_chans[i][1] != -1; i++) {
+                       off = xbar_chans[i][1];
+                       clear_bits(off, 1, cc->edma_unused);
                }
+       }
 
-               status = devm_request_irq(dev, err_irq[j], dma_ccerr_handler, 0,
-                                         "edma_error", dev);
-               if (status < 0) {
-                       dev_dbg(&pdev->dev,
-                               "devm_request_irq %d failed --> %d\n",
-                               err_irq[j], status);
-                       return status;
+       irq = platform_get_irq_byname(pdev, "edma3_ccint");
+       if (irq < 0 && node)
+               irq = irq_of_parse_and_map(node, 0);
+
+       if (irq >= 0) {
+               irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccint",
+                                         dev_name(dev));
+               ret = devm_request_irq(dev, irq, dma_irq_handler, 0, irq_name,
+                                      cc);
+               if (ret) {
+                       dev_err(dev, "CCINT (%d) failed --> %d\n", irq, ret);
+                       return ret;
                }
+       }
 
-               for (i = 0; i < edma_cc[j]->num_channels; i++)
-                       map_dmach_queue(j, i, info[j]->default_queue);
+       irq = platform_get_irq_byname(pdev, "edma3_ccerrint");
+       if (irq < 0 && node)
+               irq = irq_of_parse_and_map(node, 2);
+
+       if (irq >= 0) {
+               irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccerrint",
+                                         dev_name(dev));
+               ret = devm_request_irq(dev, irq, dma_ccerr_handler, 0, irq_name,
+                                      cc);
+               if (ret) {
+                       dev_err(dev, "CCERRINT (%d) failed --> %d\n", irq, ret);
+                       return ret;
+               }
+       }
 
-               queue_priority_mapping = info[j]->queue_priority_mapping;
+       for (i = 0; i < cc->num_channels; i++)
+               map_dmach_queue(dev_id, i, info->default_queue);
 
-               /* Event queue priority mapping */
-               for (i = 0; queue_priority_mapping[i][0] != -1; i++)
-                       assign_priority_to_queue(j,
-                                               queue_priority_mapping[i][0],
-                                               queue_priority_mapping[i][1]);
+       queue_priority_mapping = info->queue_priority_mapping;
 
-               /* Map the channel to param entry if channel mapping logic
-                * exist
-                */
-               if (edma_read(j, EDMA_CCCFG) & CHMAP_EXIST)
-                       map_dmach_param(j);
+       /* Event queue priority mapping */
+       for (i = 0; queue_priority_mapping[i][0] != -1; i++)
+               assign_priority_to_queue(dev_id, queue_priority_mapping[i][0],
+                                        queue_priority_mapping[i][1]);
 
-               for (i = 0; i < edma_cc[j]->num_region; i++) {
-                       edma_write_array2(j, EDMA_DRAE, i, 0, 0x0);
-                       edma_write_array2(j, EDMA_DRAE, i, 1, 0x0);
-                       edma_write_array(j, EDMA_QRAE, i, 0x0);
-               }
-               edma_cc[j]->info = info[j];
-               arch_num_cc++;
+       /* Map the channel to param entry if channel mapping logic exist */
+       if (edma_read(dev_id, EDMA_CCCFG) & CHMAP_EXIST)
+               map_dmach_param(dev_id);
 
-               edma_dev_info.id = j;
-               platform_device_register_full(&edma_dev_info);
+       for (i = 0; i < cc->num_region; i++) {
+               edma_write_array2(dev_id, EDMA_DRAE, i, 0, 0x0);
+               edma_write_array2(dev_id, EDMA_DRAE, i, 1, 0x0);
+               edma_write_array(dev_id, EDMA_QRAE, i, 0x0);
        }
+       cc->info = info;
+       arch_num_cc++;
+
+       edma_dev_info.id = dev_id;
+
+       platform_device_register_full(&edma_dev_info);
 
        return 0;
 }
@@ -1816,38 +1414,30 @@ static int edma_probe(struct platform_device *pdev)
 #ifdef CONFIG_PM_SLEEP
 static int edma_pm_resume(struct device *dev)
 {
-       int i, j;
+       struct edma *cc = dev_get_drvdata(dev);
+       int i;
+       s8 (*queue_priority_mapping)[2];
 
-       for (j = 0; j < arch_num_cc; j++) {
-               struct edma *cc = edma_cc[j];
+       queue_priority_mapping = cc->info->queue_priority_mapping;
 
-               s8 (*queue_priority_mapping)[2];
+       /* Event queue priority mapping */
+       for (i = 0; queue_priority_mapping[i][0] != -1; i++)
+               assign_priority_to_queue(cc->id, queue_priority_mapping[i][0],
+                                        queue_priority_mapping[i][1]);
 
-               queue_priority_mapping = cc->info->queue_priority_mapping;
+       /* Map the channel to param entry if channel mapping logic */
+       if (edma_read(cc->id, EDMA_CCCFG) & CHMAP_EXIST)
+               map_dmach_param(cc->id);
 
-               /* Event queue priority mapping */
-               for (i = 0; queue_priority_mapping[i][0] != -1; i++)
-                       assign_priority_to_queue(j,
-                                                queue_priority_mapping[i][0],
-                                                queue_priority_mapping[i][1]);
+       for (i = 0; i < cc->num_channels; i++) {
+               if (test_bit(i, cc->edma_inuse)) {
+                       /* ensure access through shadow region 0 */
+                       edma_or_array2(cc->id, EDMA_DRAE, 0, i >> 5,
+                                      BIT(i & 0x1f));
 
-               /*
-                * Map the channel to param entry if channel mapping logic
-                * exist
-                */
-               if (edma_read(j, EDMA_CCCFG) & CHMAP_EXIST)
-                       map_dmach_param(j);
-
-               for (i = 0; i < cc->num_channels; i++) {
-                       if (test_bit(i, cc->edma_inuse)) {
-                               /* ensure access through shadow region 0 */
-                               edma_or_array2(j, EDMA_DRAE, 0, i >> 5,
-                                              BIT(i & 0x1f));
-
-                               setup_dma_interrupt(i,
-                                                   cc->intr_data[i].callback,
-                                                   cc->intr_data[i].data);
-                       }
+                       setup_dma_interrupt(EDMA_CTLR_CHAN(cc->id, i),
+                                           cc->intr_data[i].callback,
+                                           cc->intr_data[i].data);
                }
        }
 
This page took 0.037261 seconds and 5 git commands to generate.