@@ -28,6 +28,8 @@
#define CAPTURE_MIN_PERIOD_SIZE 320
#define BUFFER_BYTES_MAX (PLAYBACK_MAX_NUM_PERIODS * PLAYBACK_MAX_PERIOD_SIZE)
#define BUFFER_BYTES_MIN (PLAYBACK_MIN_NUM_PERIODS * PLAYBACK_MIN_PERIOD_SIZE)
+#define COMPR_PLAYBACK_MAX_FRAGMENT_SIZE (128 * 1024)
+#define COMPR_PLAYBACK_MAX_NUM_FRAGMENTS (16 * 4)
#define SID_MASK_DEFAULT 0xF
enum stream_state {
@@ -55,6 +57,7 @@ struct q6apm_dai_rtd {
enum stream_state state;
struct q6apm_graph *graph;
spinlock_t lock;
+ bool notify_on_drain;
};
struct q6apm_dai_data {
@@ -132,6 +135,69 @@ static void event_handler(uint32_t opcode, uint32_t token, uint32_t *payload, vo
}
}
+static void event_handler_compr(uint32_t opcode, uint32_t token,
+ uint32_t *payload, void *priv)
+{
+ struct q6apm_dai_rtd *prtd = priv;
+ struct snd_compr_stream *substream = prtd->cstream;
+ unsigned long flags;
+ uint32_t wflags = 0;
+ uint64_t avail;
+ uint32_t bytes_written, bytes_to_write;
+ bool is_last_buffer = false;
+
+ switch (opcode) {
+ case APM_CLIENT_EVENT_CMD_EOS_DONE:
+ spin_lock_irqsave(&prtd->lock, flags);
+ if (prtd->notify_on_drain) {
+ snd_compr_drain_notify(prtd->cstream);
+ prtd->notify_on_drain = false;
+ } else {
+ prtd->state = Q6APM_STREAM_STOPPED;
+ }
+ spin_unlock_irqrestore(&prtd->lock, flags);
+ break;
+ case APM_CLIENT_EVENT_DATA_WRITE_DONE:
+ spin_lock_irqsave(&prtd->lock, flags);
+ bytes_written = token >> APM_WRITE_TOKEN_LEN_SHIFT;
+ prtd->copied_total += bytes_written;
+ snd_compr_fragment_elapsed(substream);
+
+ if (prtd->state != Q6APM_STREAM_RUNNING) {
+ spin_unlock_irqrestore(&prtd->lock, flags);
+ break;
+ }
+
+ avail = prtd->bytes_received - prtd->bytes_sent;
+
+ if (avail > prtd->pcm_count) {
+ bytes_to_write = prtd->pcm_count;
+ } else {
+ if (substream->partial_drain || prtd->notify_on_drain)
+ is_last_buffer = true;
+ bytes_to_write = avail;
+ }
+
+ if (bytes_to_write) {
+ if (substream->partial_drain && is_last_buffer)
+ wflags |= APM_LAST_BUFFER_FLAG;
+
+ q6apm_write_async(prtd->graph,
+ bytes_to_write, 0, 0, wflags);
+
+ prtd->bytes_sent += bytes_to_write;
+
+ if (prtd->notify_on_drain && is_last_buffer)
+ audioreach_shared_memory_send_eos(prtd->graph);
+ }
+
+ spin_unlock_irqrestore(&prtd->lock, flags);
+ break;
+ default:
+ break;
+ }
+}
+
static int q6apm_dai_prepare(struct snd_soc_component *component,
struct snd_pcm_substream *substream)
{
@@ -387,6 +453,75 @@ static int q6apm_dai_pcm_new(struct snd_soc_component *component, struct snd_soc
return snd_pcm_set_fixed_buffer_all(rtd->pcm, SNDRV_DMA_TYPE_DEV, component->dev, size);
}
+static int q6apm_dai_compr_open(struct snd_soc_component *component,
+ struct snd_compr_stream *stream)
+{
+ struct snd_soc_pcm_runtime *rtd = stream->private_data;
+ struct snd_soc_dai *cpu_dai = asoc_rtd_to_cpu(rtd, 0);
+ struct snd_compr_runtime *runtime = stream->runtime;
+ struct q6apm_dai_rtd *prtd;
+ struct q6apm_dai_data *pdata;
+ struct device *dev = component->dev;
+ int ret, size;
+ int graph_id;
+
+ graph_id = cpu_dai->driver->id;
+ pdata = snd_soc_component_get_drvdata(component);
+ if (!pdata)
+ return -EINVAL;
+
+ prtd = kzalloc(sizeof(*prtd), GFP_KERNEL);
+ if (prtd == NULL)
+ return -ENOMEM;
+
+ prtd->cstream = stream;
+ prtd->graph = q6apm_graph_open(dev, (q6apm_cb)event_handler_compr, prtd, graph_id);
+ if (IS_ERR(prtd->graph)) {
+ ret = PTR_ERR(prtd->graph);
+ kfree(prtd);
+ return ret;
+ }
+
+ runtime->private_data = prtd;
+ runtime->dma_bytes = BUFFER_BYTES_MAX;
+ size = COMPR_PLAYBACK_MAX_FRAGMENT_SIZE * COMPR_PLAYBACK_MAX_NUM_FRAGMENTS;
+ ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, dev, size, &prtd->dma_buffer);
+ if (ret)
+ return ret;
+
+ if (pdata->sid < 0)
+ prtd->phys = prtd->dma_buffer.addr;
+ else
+ prtd->phys = prtd->dma_buffer.addr | (pdata->sid << 32);
+
+ snd_compr_set_runtime_buffer(stream, &prtd->dma_buffer);
+ spin_lock_init(&prtd->lock);
+
+ q6apm_enable_compress_module(dev, prtd->graph, true);
+ return 0;
+}
+
+static int q6apm_dai_compr_free(struct snd_soc_component *component,
+ struct snd_compr_stream *stream)
+{
+ struct snd_compr_runtime *runtime = stream->runtime;
+ struct q6apm_dai_rtd *prtd = runtime->private_data;
+
+ q6apm_graph_stop(prtd->graph);
+ q6apm_unmap_memory_regions(prtd->graph, SNDRV_PCM_STREAM_PLAYBACK);
+ q6apm_graph_close(prtd->graph);
+ snd_dma_free_pages(&prtd->dma_buffer);
+ prtd->graph = NULL;
+ kfree(prtd);
+ runtime->private_data = NULL;
+
+ return 0;
+}
+static const struct snd_compress_ops q6apm_dai_compress_ops = {
+ .open = q6apm_dai_compr_open,
+ .free = q6apm_dai_compr_free,
+};
+
static const struct snd_soc_component_driver q6apm_fe_dai_component = {
.name = DRV_NAME,
.open = q6apm_dai_open,
@@ -396,6 +531,7 @@ static const struct snd_soc_component_driver q6apm_fe_dai_component = {
.hw_params = q6apm_dai_hw_params,
.pointer = q6apm_dai_pointer,
.trigger = q6apm_dai_trigger,
+ .compress_ops = &q6apm_dai_compress_ops,
};
static int q6apm_dai_probe(struct platform_device *pdev)
@@ -45,6 +45,7 @@
#define APM_WRITE_TOKEN_LEN_SHIFT 16
#define APM_MAX_SESSIONS 8
+#define APM_LAST_BUFFER_FLAG BIT(30)
struct q6apm {
struct device *dev;