@@ -305,7 +305,7 @@ static struct iio_buffer *axi_adc_request_buffer(struct iio_backend *back,
static void axi_adc_free_buffer(struct iio_backend *back,
struct iio_buffer *buffer)
{
- iio_dmaengine_buffer_free(buffer);
+ iio_dmaengine_buffer_teardown(buffer);
}
static int axi_adc_reg_access(struct iio_backend *back, unsigned int reg,
@@ -206,39 +206,29 @@ static const struct iio_dev_attr *iio_dmaengine_buffer_attrs[] = {
/**
* iio_dmaengine_buffer_alloc() - Allocate new buffer which uses DMAengine
- * @dev: DMA channel consumer device
- * @channel: DMA channel name, typically "rx".
+ * @chan: DMA channel.
*
* This allocates a new IIO buffer which internally uses the DMAengine framework
- * to perform its transfers. The parent device will be used to request the DMA
- * channel.
+ * to perform its transfers.
*
* Once done using the buffer iio_dmaengine_buffer_free() should be used to
* release it.
*/
-static struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
- const char *channel)
+static struct iio_buffer *iio_dmaengine_buffer_alloc(struct dma_chan *chan)
{
struct dmaengine_buffer *dmaengine_buffer;
unsigned int width, src_width, dest_width;
struct dma_slave_caps caps;
- struct dma_chan *chan;
int ret;
+ ret = dma_get_slave_caps(chan, &caps);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
dmaengine_buffer = kzalloc(sizeof(*dmaengine_buffer), GFP_KERNEL);
if (!dmaengine_buffer)
return ERR_PTR(-ENOMEM);
- chan = dma_request_chan(dev, channel);
- if (IS_ERR(chan)) {
- ret = PTR_ERR(chan);
- goto err_free;
- }
-
- ret = dma_get_slave_caps(chan, &caps);
- if (ret < 0)
- goto err_release;
-
/* Needs to be aligned to the maximum of the minimums */
if (caps.src_addr_widths)
src_width = __ffs(caps.src_addr_widths);
@@ -262,12 +252,6 @@ static struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
dmaengine_buffer->queue.buffer.access = &iio_dmaengine_buffer_ops;
return &dmaengine_buffer->queue.buffer;
-
-err_release:
- dma_release_channel(chan);
-err_free:
- kfree(dmaengine_buffer);
- return ERR_PTR(ret);
}
/**
@@ -276,17 +260,57 @@ static struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
*
* Frees a buffer previously allocated with iio_dmaengine_buffer_alloc().
*/
-void iio_dmaengine_buffer_free(struct iio_buffer *buffer)
+static void iio_dmaengine_buffer_free(struct iio_buffer *buffer)
{
struct dmaengine_buffer *dmaengine_buffer =
iio_buffer_to_dmaengine_buffer(buffer);
iio_dma_buffer_exit(&dmaengine_buffer->queue);
- dma_release_channel(dmaengine_buffer->chan);
-
iio_buffer_put(buffer);
}
-EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_free, "IIO_DMAENGINE_BUFFER");
+
+/**
+ * iio_dmaengine_buffer_teardown() - Releases DMA channel and frees buffer
+ * @buffer: Buffer to free
+ *
+ * Releases the DMA channel and frees the buffer previously setup with
+ * iio_dmaengine_buffer_setup_ext().
+ */
+void iio_dmaengine_buffer_teardown(struct iio_buffer *buffer)
+{
+ struct dmaengine_buffer *dmaengine_buffer =
+ iio_buffer_to_dmaengine_buffer(buffer);
+ struct dma_chan *chan = dmaengine_buffer->chan;
+
+ iio_dmaengine_buffer_free(buffer);
+ dma_release_channel(chan);
+}
+EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_teardown, "IIO_DMAENGINE_BUFFER");
+
+static struct iio_buffer
+*__iio_dmaengine_buffer_setup_ext(struct iio_dev *indio_dev,
+ struct dma_chan *chan,
+ enum iio_buffer_direction dir)
+{
+ struct iio_buffer *buffer;
+ int ret;
+
+ buffer = iio_dmaengine_buffer_alloc(chan);
+ if (IS_ERR(buffer))
+ return ERR_CAST(buffer);
+
+ indio_dev->modes |= INDIO_BUFFER_HARDWARE;
+
+ buffer->direction = dir;
+
+ ret = iio_device_attach_buffer(indio_dev, buffer);
+ if (ret) {
+ iio_dmaengine_buffer_free(buffer);
+ return ERR_PTR(ret);
+ }
+
+ return buffer;
+}
/**
* iio_dmaengine_buffer_setup_ext() - Setup a DMA buffer for an IIO device
@@ -300,7 +324,7 @@ EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_free, "IIO_DMAENGINE_BUFFER");
* It also appends the INDIO_BUFFER_HARDWARE mode to the supported modes of the
* IIO device.
*
- * Once done using the buffer iio_dmaengine_buffer_free() should be used to
+ * Once done using the buffer iio_dmaengine_buffer_teardown() should be used to
* release it.
*/
struct iio_buffer *iio_dmaengine_buffer_setup_ext(struct device *dev,
@@ -308,30 +332,24 @@ struct iio_buffer *iio_dmaengine_buffer_setup_ext(struct device *dev,
const char *channel,
enum iio_buffer_direction dir)
{
+ struct dma_chan *chan;
struct iio_buffer *buffer;
- int ret;
-
- buffer = iio_dmaengine_buffer_alloc(dev, channel);
- if (IS_ERR(buffer))
- return ERR_CAST(buffer);
-
- indio_dev->modes |= INDIO_BUFFER_HARDWARE;
- buffer->direction = dir;
+ chan = dma_request_chan(dev, channel);
+ if (IS_ERR(chan))
+ return ERR_CAST(chan);
- ret = iio_device_attach_buffer(indio_dev, buffer);
- if (ret) {
- iio_dmaengine_buffer_free(buffer);
- return ERR_PTR(ret);
- }
+ buffer = __iio_dmaengine_buffer_setup_ext(indio_dev, chan, dir);
+ if (IS_ERR(buffer))
+ dma_release_channel(chan);
return buffer;
}
EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_setup_ext, "IIO_DMAENGINE_BUFFER");
-static void __devm_iio_dmaengine_buffer_free(void *buffer)
+static void devm_iio_dmaengine_buffer_teardown(void *buffer)
{
- iio_dmaengine_buffer_free(buffer);
+ iio_dmaengine_buffer_teardown(buffer);
}
/**
@@ -357,7 +375,7 @@ int devm_iio_dmaengine_buffer_setup_ext(struct device *dev,
if (IS_ERR(buffer))
return PTR_ERR(buffer);
- return devm_add_action_or_reset(dev, __devm_iio_dmaengine_buffer_free,
+ return devm_add_action_or_reset(dev, devm_iio_dmaengine_buffer_teardown,
buffer);
}
EXPORT_SYMBOL_NS_GPL(devm_iio_dmaengine_buffer_setup_ext, "IIO_DMAENGINE_BUFFER");
@@ -168,7 +168,7 @@ static struct iio_buffer *axi_dac_request_buffer(struct iio_backend *back,
static void axi_dac_free_buffer(struct iio_backend *back,
struct iio_buffer *buffer)
{
- iio_dmaengine_buffer_free(buffer);
+ iio_dmaengine_buffer_teardown(buffer);
}
enum {
@@ -12,7 +12,7 @@
struct iio_dev;
struct device;
-void iio_dmaengine_buffer_free(struct iio_buffer *buffer);
+void iio_dmaengine_buffer_teardown(struct iio_buffer *buffer);
struct iio_buffer *iio_dmaengine_buffer_setup_ext(struct device *dev,
struct iio_dev *indio_dev,
const char *channel,
Refactor the IIO dmaengine buffer code to split requesting the DMA channel from allocating the buffer. We want to be able to add a new function where the IIO device driver manages the DMA channel, so these two actions need to be separate. To do this, calling dma_request_chan() is moved from iio_dmaengine_buffer_alloc() to iio_dmaengine_buffer_setup_ext(). A new __iio_dmaengine_buffer_setup_ext() helper function is added to simplify error unwinding and will also be used by a new function in a later patch. iio_dmaengine_buffer_free() now only frees the buffer and does not release the DMA channel. A new iio_dmaengine_buffer_teardown() function is added to unwind everything done in iio_dmaengine_buffer_setup_ext(). This keeps things more symmetrical with obvious pairs alloc/free and setup/teardown. Calling dma_get_slave_caps() in iio_dmaengine_buffer_alloc() is moved so that we can avoid any gotos for error unwinding. Signed-off-by: David Lechner <dlechner@baylibre.com> --- v6 changes: * Split out from patch that adds the new function * Dropped owns_chan flag * Introduced iio_dmaengine_buffer_teardown() so that iio_dmaengine_buffer_free() doesn't have to manage the DMA channel --- drivers/iio/adc/adi-axi-adc.c | 2 +- drivers/iio/buffer/industrialio-buffer-dmaengine.c | 106 ++++++++++++--------- drivers/iio/dac/adi-axi-dac.c | 2 +- include/linux/iio/buffer-dmaengine.h | 2 +- 4 files changed, 65 insertions(+), 47 deletions(-)