lan78xx: Add to handle mux control per chip id
[deliverable/linux.git] / drivers / dma / dmaengine.c
index 3ecec1445adfc9d76408fd88fb55ed5c8b79d98c..c50a247be2e0365d4cac7ccc8ce6bbb33f671354 100644 (file)
@@ -43,6 +43,7 @@
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
+#include <linux/platform_device.h>
 #include <linux/dma-mapping.h>
 #include <linux/init.h>
 #include <linux/module.h>
@@ -265,8 +266,11 @@ static void dma_chan_put(struct dma_chan *chan)
        module_put(dma_chan_to_owner(chan));
 
        /* This channel is not in use anymore, free it */
-       if (!chan->client_count && chan->device->device_free_chan_resources)
+       if (!chan->client_count && chan->device->device_free_chan_resources) {
+               /* Make sure all operations have completed */
+               dmaengine_synchronize(chan);
                chan->device->device_free_chan_resources(chan);
+       }
 
        /* If the channel is used via a DMA request router, free the mapping */
        if (chan->router && chan->router->route_free) {
@@ -493,6 +497,7 @@ int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
        caps->dst_addr_widths = device->dst_addr_widths;
        caps->directions = device->directions;
        caps->residue_granularity = device->residue_granularity;
+       caps->descriptor_reuse = device->descriptor_reuse;
 
        /*
         * Some devices implement only pause (e.g. to get residuum) but no
@@ -511,7 +516,7 @@ static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
 {
        struct dma_chan *chan;
 
-       if (!__dma_device_satisfies_mask(dev, mask)) {
+       if (mask && !__dma_device_satisfies_mask(dev, mask)) {
                pr_debug("%s: wrong capabilities\n", __func__);
                return NULL;
        }
@@ -542,6 +547,42 @@ static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
        return NULL;
 }
 
+static struct dma_chan *find_candidate(struct dma_device *device,
+                                      const dma_cap_mask_t *mask,
+                                      dma_filter_fn fn, void *fn_param)
+{
+       struct dma_chan *chan = private_candidate(mask, device, fn, fn_param);
+       int err;
+
+       if (chan) {
+               /* Found a suitable channel, try to grab, prep, and return it.
+                * We first set DMA_PRIVATE to disable balance_ref_count as this
+                * channel will not be published in the general-purpose
+                * allocator
+                */
+               dma_cap_set(DMA_PRIVATE, device->cap_mask);
+               device->privatecnt++;
+               err = dma_chan_get(chan);
+
+               if (err) {
+                       if (err == -ENODEV) {
+                               pr_debug("%s: %s module removed\n", __func__,
+                                        dma_chan_name(chan));
+                               list_del_rcu(&device->global_node);
+                       } else
+                               pr_debug("%s: failed to get %s: (%d)\n",
+                                        __func__, dma_chan_name(chan), err);
+
+                       if (--device->privatecnt == 0)
+                               dma_cap_clear(DMA_PRIVATE, device->cap_mask);
+
+                       chan = ERR_PTR(err);
+               }
+       }
+
+       return chan ? chan : ERR_PTR(-EPROBE_DEFER);
+}
+
 /**
  * dma_get_slave_channel - try to get specific channel exclusively
  * @chan: target channel
@@ -580,7 +621,6 @@ struct dma_chan *dma_get_any_slave_channel(struct dma_device *device)
 {
        dma_cap_mask_t mask;
        struct dma_chan *chan;
-       int err;
 
        dma_cap_zero(mask);
        dma_cap_set(DMA_SLAVE, mask);
@@ -588,23 +628,11 @@ struct dma_chan *dma_get_any_slave_channel(struct dma_device *device)
        /* lock against __dma_request_channel */
        mutex_lock(&dma_list_mutex);
 
-       chan = private_candidate(&mask, device, NULL, NULL);
-       if (chan) {
-               dma_cap_set(DMA_PRIVATE, device->cap_mask);
-               device->privatecnt++;
-               err = dma_chan_get(chan);
-               if (err) {
-                       pr_debug("%s: failed to get %s: (%d)\n",
-                               __func__, dma_chan_name(chan), err);
-                       chan = NULL;
-                       if (--device->privatecnt == 0)
-                               dma_cap_clear(DMA_PRIVATE, device->cap_mask);
-               }
-       }
+       chan = find_candidate(device, &mask, NULL, NULL);
 
        mutex_unlock(&dma_list_mutex);
 
-       return chan;
+       return IS_ERR(chan) ? NULL : chan;
 }
 EXPORT_SYMBOL_GPL(dma_get_any_slave_channel);
 
@@ -621,35 +649,15 @@ struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
 {
        struct dma_device *device, *_d;
        struct dma_chan *chan = NULL;
-       int err;
 
        /* Find a channel */
        mutex_lock(&dma_list_mutex);
        list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
-               chan = private_candidate(mask, device, fn, fn_param);
-               if (chan) {
-                       /* Found a suitable channel, try to grab, prep, and
-                        * return it.  We first set DMA_PRIVATE to disable
-                        * balance_ref_count as this channel will not be
-                        * published in the general-purpose allocator
-                        */
-                       dma_cap_set(DMA_PRIVATE, device->cap_mask);
-                       device->privatecnt++;
-                       err = dma_chan_get(chan);
+               chan = find_candidate(device, mask, fn, fn_param);
+               if (!IS_ERR(chan))
+                       break;
 
-                       if (err == -ENODEV) {
-                               pr_debug("%s: %s module removed\n",
-                                        __func__, dma_chan_name(chan));
-                               list_del_rcu(&device->global_node);
-                       } else if (err)
-                               pr_debug("%s: failed to get %s: (%d)\n",
-                                        __func__, dma_chan_name(chan), err);
-                       else
-                               break;
-                       if (--device->privatecnt == 0)
-                               dma_cap_clear(DMA_PRIVATE, device->cap_mask);
-                       chan = NULL;
-               }
+               chan = NULL;
        }
        mutex_unlock(&dma_list_mutex);
 
@@ -662,27 +670,73 @@ struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
 }
 EXPORT_SYMBOL_GPL(__dma_request_channel);
 
+static const struct dma_slave_map *dma_filter_match(struct dma_device *device,
+                                                   const char *name,
+                                                   struct device *dev)
+{
+       int i;
+
+       if (!device->filter.mapcnt)
+               return NULL;
+
+       for (i = 0; i < device->filter.mapcnt; i++) {
+               const struct dma_slave_map *map = &device->filter.map[i];
+
+               if (!strcmp(map->devname, dev_name(dev)) &&
+                   !strcmp(map->slave, name))
+                       return map;
+       }
+
+       return NULL;
+}
+
 /**
- * dma_request_slave_channel_reason - try to allocate an exclusive slave channel
+ * dma_request_chan - try to allocate an exclusive slave channel
  * @dev:       pointer to client device structure
  * @name:      slave channel name
  *
  * Returns pointer to appropriate DMA channel on success or an error pointer.
  */
-struct dma_chan *dma_request_slave_channel_reason(struct device *dev,
-                                                 const char *name)
+struct dma_chan *dma_request_chan(struct device *dev, const char *name)
 {
+       struct dma_device *d, *_d;
+       struct dma_chan *chan = NULL;
+
        /* If device-tree is present get slave info from here */
        if (dev->of_node)
-               return of_dma_request_slave_channel(dev->of_node, name);
+               chan = of_dma_request_slave_channel(dev->of_node, name);
 
        /* If device was enumerated by ACPI get slave info from here */
-       if (ACPI_HANDLE(dev))
-               return acpi_dma_request_slave_chan_by_name(dev, name);
+       if (has_acpi_companion(dev) && !chan)
+               chan = acpi_dma_request_slave_chan_by_name(dev, name);
+
+       if (chan) {
+               /* Valid channel found or requester need to be deferred */
+               if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
+                       return chan;
+       }
 
-       return ERR_PTR(-ENODEV);
+       /* Try to find the channel via the DMA filter map(s) */
+       mutex_lock(&dma_list_mutex);
+       list_for_each_entry_safe(d, _d, &dma_device_list, global_node) {
+               dma_cap_mask_t mask;
+               const struct dma_slave_map *map = dma_filter_match(d, name, dev);
+
+               if (!map)
+                       continue;
+
+               dma_cap_zero(mask);
+               dma_cap_set(DMA_SLAVE, mask);
+
+               chan = find_candidate(d, &mask, d->filter.fn, map->param);
+               if (!IS_ERR(chan))
+                       break;
+       }
+       mutex_unlock(&dma_list_mutex);
+
+       return chan ? chan : ERR_PTR(-EPROBE_DEFER);
 }
-EXPORT_SYMBOL_GPL(dma_request_slave_channel_reason);
+EXPORT_SYMBOL_GPL(dma_request_chan);
 
 /**
  * dma_request_slave_channel - try to allocate an exclusive slave channel
@@ -694,17 +748,35 @@ EXPORT_SYMBOL_GPL(dma_request_slave_channel_reason);
 struct dma_chan *dma_request_slave_channel(struct device *dev,
                                           const char *name)
 {
-       struct dma_chan *ch = dma_request_slave_channel_reason(dev, name);
+       struct dma_chan *ch = dma_request_chan(dev, name);
        if (IS_ERR(ch))
                return NULL;
 
-       dma_cap_set(DMA_PRIVATE, ch->device->cap_mask);
-       ch->device->privatecnt++;
-
        return ch;
 }
 EXPORT_SYMBOL_GPL(dma_request_slave_channel);
 
+/**
+ * dma_request_chan_by_mask - allocate a channel satisfying certain capabilities
+ * @mask: capabilities that the channel must satisfy
+ *
+ * Returns pointer to appropriate DMA channel on success or an error pointer.
+ */
+struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask)
+{
+       struct dma_chan *chan;
+
+       if (!mask)
+               return ERR_PTR(-ENODEV);
+
+       chan = __dma_request_channel(mask, NULL, NULL);
+       if (!chan)
+               chan = ERR_PTR(-ENODEV);
+
+       return chan;
+}
+EXPORT_SYMBOL_GPL(dma_request_chan_by_mask);
+
 void dma_release_channel(struct dma_chan *chan)
 {
        mutex_lock(&dma_list_mutex);
This page took 0.036308 seconds and 5 git commands to generate.