From patchwork Tue Jun 21 06:12:24 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: David Jander X-Patchwork-Id: 583820 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 075D1CCA473 for ; Tue, 21 Jun 2022 06:12:47 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S229681AbiFUGMq (ORCPT ); Tue, 21 Jun 2022 02:12:46 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:39766 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1345578AbiFUGMo (ORCPT ); Tue, 21 Jun 2022 02:12:44 -0400 Received: from smtp15.bhosted.nl (smtp15.bhosted.nl [IPv6:2a02:9e0:8000::26]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 573DA1AD92 for ; Mon, 20 Jun 2022 23:12:40 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=protonic.nl; s=202111; h=content-transfer-encoding:mime-version:references:in-reply-to:message-id:date: subject:cc:to:from:from; bh=pCfD1n1XsGzmFuTRiNWt50/YhUvtppaL5IsTk3NIMGc=; b=pNh8CwXeW8YrQD0trteX1pjZo+K0sn6dn1Uutf/WqiRZtBaxYUIo/6to+dk2G418NJC23HKuXAGCc NodS9P5MpBIUu5k5H+xKaX68OUexavIYMtu+HTkWh1UjmdxI95ocVApsrv8clu68+yWEyvZZZBPoXd 6Z4fcEl4wKChGKWaZCXgu/98Q62EKIw771La2Hkj5rWxP64qiX831+Rhh5NLV1jgxVV2Jn6xq+THTq RzumYMB+lt+ihXjYwd5IQnPGjclsNaAqT298CjdNhhNAIg77zQ3g7rOCem37ArKDppw8+u1+RmQG+w 37CtgmyPn0W7lefMXe4kKNCtoSQ8aGg== X-MSG-ID: 25e4a859-f129-11ec-ba03-0050569d3a82 From: David Jander To: Mark Brown Cc: linux-spi@vger.kernel.org, Marc Kleine-Budde , Andrew Lunn , David Jander Subject: [PATCH v3 01/11] spi: Move ctlr->cur_msg_prepared to struct spi_message Date: Tue, 21 Jun 2022 08:12:24 +0200 Message-Id: <20220621061234.3626638-2-david@protonic.nl> X-Mailer: git-send-email 2.32.0 In-Reply-To: <20220621061234.3626638-1-david@protonic.nl> References: <20220621061234.3626638-1-david@protonic.nl> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-spi@vger.kernel.org This enables the possibility to transfer a message that is not at the current tip of the async message queue. This is in preparation of the next patch(es) which enable spi_sync messages to skip the queue altogether. Signed-off-by: David Jander --- drivers/spi/spi.c | 7 ++++--- include/linux/spi/spi.h | 7 ++++--- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index c78d1ceeaa42..eb6360153fa1 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -1684,7 +1684,7 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread) spi_finalize_current_message(ctlr); goto out; } - ctlr->cur_msg_prepared = true; + msg->prepared = true; } ret = spi_map_msg(ctlr, msg); @@ -1926,7 +1926,7 @@ void spi_finalize_current_message(struct spi_controller *ctlr) */ spi_res_release(ctlr, mesg); - if (ctlr->cur_msg_prepared && ctlr->unprepare_message) { + if (mesg->prepared && ctlr->unprepare_message) { ret = ctlr->unprepare_message(ctlr, mesg); if (ret) { dev_err(&ctlr->dev, "failed to unprepare message: %d\n", @@ -1934,9 +1934,10 @@ void spi_finalize_current_message(struct spi_controller *ctlr) } } + mesg->prepared = false; + spin_lock_irqsave(&ctlr->queue_lock, flags); ctlr->cur_msg = NULL; - ctlr->cur_msg_prepared = false; ctlr->fallback = false; kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); spin_unlock_irqrestore(&ctlr->queue_lock, flags); diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index c96f526d9a20..1a75c26742f2 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -385,8 +385,6 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch * @queue: message queue * @idling: the device is entering idle state * @cur_msg: the currently in-flight message - * @cur_msg_prepared: spi_prepare_message was called for the currently - * in-flight message * @cur_msg_mapped: message has been mapped for DMA * @last_cs: the last chip_select that is recorded by set_cs, -1 on non chip * selected @@ -621,7 +619,6 @@ struct spi_controller { bool running; bool rt; bool auto_runtime_pm; - bool cur_msg_prepared; bool cur_msg_mapped; char last_cs; bool last_cs_mode_high; @@ -988,6 +985,7 @@ struct spi_transfer { * @queue: for use by whichever driver currently owns the message * @state: for use by whichever driver currently owns the message * @resources: for resource management when the spi message is processed + * @prepared: spi_prepare_message was called for the this message * * A @spi_message is used to execute an atomic sequence of data transfers, * each represented by a struct spi_transfer. The sequence is "atomic" @@ -1037,6 +1035,9 @@ struct spi_message { /* list of spi_res reources when the spi message is processed */ struct list_head resources; + + /* spi_prepare_message was called for this message */ + bool prepared; }; static inline void spi_message_init_no_memset(struct spi_message *m) From patchwork Tue Jun 21 06:12:25 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: David Jander X-Patchwork-Id: 583818 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 2B313C43334 for ; Tue, 21 Jun 2022 06:12:49 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1345610AbiFUGMs (ORCPT ); Tue, 21 Jun 2022 02:12:48 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:39780 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1345602AbiFUGMp (ORCPT ); Tue, 21 Jun 2022 02:12:45 -0400 Received: from smtp28.bhosted.nl (smtp28.bhosted.nl [IPv6:2a02:9e0:8000::40]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id DCEE919F86 for ; Mon, 20 Jun 2022 23:12:40 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=protonic.nl; s=202111; h=content-transfer-encoding:mime-version:references:in-reply-to:message-id:date: subject:cc:to:from:from; bh=gLZK2265Pd0i7MErC6G690atwVBOD3ajBYmbAdqlGCc=; b=l52um17zX+2C2Lp0aoJHbk6k2cgWvGImdEH11J1jPNdjJ05AQbJ7TKNLC3UHUwkyE1jIEwKmbVdFc DF1N67VFcAl1ni6Ltx9vT0kSQoQTttK/kO+31w/u7m3FlikzrdljQm0rcHkv3kaqkxIdmE/H0orFOl kl2/vTOVKvLPwcA4PKJD2cyClpiZK61rr1iOAcMLlTXFnx5xR3ugpTTiyeXrMR4nyfBPLDI2TxVxeo BLq5gUTpHIZEL6Sf3XNGgjjJ/7/2x2Uwk46Q3KLEc5oYPswWjdxfP1G2bZa2hnfyj8xxZ068FTEcSf Adf748949Qcon5FgJrA5AIvPA7zR9Zg== X-MSG-ID: 25f52207-f129-11ec-8a45-0050569d11ae From: David Jander To: Mark Brown Cc: linux-spi@vger.kernel.org, Marc Kleine-Budde , Andrew Lunn , David Jander Subject: [PATCH v3 02/11] spi: Don't use the message queue if possible in spi_sync Date: Tue, 21 Jun 2022 08:12:25 +0200 Message-Id: <20220621061234.3626638-3-david@protonic.nl> X-Mailer: git-send-email 2.32.0 In-Reply-To: <20220621061234.3626638-1-david@protonic.nl> References: <20220621061234.3626638-1-david@protonic.nl> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-spi@vger.kernel.org The interaction with the controller message queue and its corresponding auxiliary flags and variables requires the use of the queue_lock which is costly. Since spi_sync will transfer the complete message anyway, and not return until it is finished, there is no need to put the message into the queue if the queue is empty. This can save a lot of overhead. As an example of how significant this is, when using the MCP2518FD SPI CAN controller on a i.MX8MM SoC, the time during which the interrupt line stays active (during 3 relatively short spi_sync messages), is reduced from 98us to 72us by this patch. Signed-off-by: David Jander --- drivers/spi/spi.c | 246 ++++++++++++++++++++++++---------------- include/linux/spi/spi.h | 11 +- 2 files changed, 159 insertions(+), 98 deletions(-) diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index eb6360153fa1..2d057d03c4f7 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -1549,6 +1549,80 @@ static void spi_idle_runtime_pm(struct spi_controller *ctlr) } } +static int __spi_pump_transfer_message(struct spi_controller *ctlr, + struct spi_message *msg, bool was_busy) +{ + struct spi_transfer *xfer; + int ret; + + if (!was_busy && ctlr->auto_runtime_pm) { + ret = pm_runtime_get_sync(ctlr->dev.parent); + if (ret < 0) { + pm_runtime_put_noidle(ctlr->dev.parent); + dev_err(&ctlr->dev, "Failed to power device: %d\n", + ret); + return ret; + } + } + + if (!was_busy) + trace_spi_controller_busy(ctlr); + + if (!was_busy && ctlr->prepare_transfer_hardware) { + ret = ctlr->prepare_transfer_hardware(ctlr); + if (ret) { + dev_err(&ctlr->dev, + "failed to prepare transfer hardware: %d\n", + ret); + + if (ctlr->auto_runtime_pm) + pm_runtime_put(ctlr->dev.parent); + + msg->status = ret; + spi_finalize_current_message(ctlr); + + return ret; + } + } + + trace_spi_message_start(msg); + + if (ctlr->prepare_message) { + ret = ctlr->prepare_message(ctlr, msg); + if (ret) { + dev_err(&ctlr->dev, "failed to prepare message: %d\n", + ret); + msg->status = ret; + spi_finalize_current_message(ctlr); + return ret; + } + msg->prepared = true; + } + + ret = spi_map_msg(ctlr, msg); + if (ret) { + msg->status = ret; + spi_finalize_current_message(ctlr); + return ret; + } + + if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) { + list_for_each_entry(xfer, &msg->transfers, transfer_list) { + xfer->ptp_sts_word_pre = 0; + ptp_read_system_prets(xfer->ptp_sts); + } + } + + ret = ctlr->transfer_one_message(ctlr, msg); + if (ret) { + dev_err(&ctlr->dev, + "failed to transfer one message from queue\n"); + return ret; + } + + return 0; +} + /** * __spi_pump_messages - function which processes spi message queue * @ctlr: controller to process queue for @@ -1564,7 +1638,6 @@ static void spi_idle_runtime_pm(struct spi_controller *ctlr) */ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread) { - struct spi_transfer *xfer; struct spi_message *msg; bool was_busy = false; unsigned long flags; @@ -1599,6 +1672,7 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread) !ctlr->unprepare_transfer_hardware) { spi_idle_runtime_pm(ctlr); ctlr->busy = false; + ctlr->queue_empty = true; trace_spi_controller_idle(ctlr); } else { kthread_queue_work(ctlr->kworker, @@ -1625,6 +1699,7 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread) spin_lock_irqsave(&ctlr->queue_lock, flags); ctlr->idling = false; + ctlr->queue_empty = true; spin_unlock_irqrestore(&ctlr->queue_lock, flags); return; } @@ -1641,75 +1716,7 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread) spin_unlock_irqrestore(&ctlr->queue_lock, flags); mutex_lock(&ctlr->io_mutex); - - if (!was_busy && ctlr->auto_runtime_pm) { - ret = pm_runtime_resume_and_get(ctlr->dev.parent); - if (ret < 0) { - dev_err(&ctlr->dev, "Failed to power device: %d\n", - ret); - mutex_unlock(&ctlr->io_mutex); - return; - } - } - - if (!was_busy) - trace_spi_controller_busy(ctlr); - - if (!was_busy && ctlr->prepare_transfer_hardware) { - ret = ctlr->prepare_transfer_hardware(ctlr); - if (ret) { - dev_err(&ctlr->dev, - "failed to prepare transfer hardware: %d\n", - ret); - - if (ctlr->auto_runtime_pm) - pm_runtime_put(ctlr->dev.parent); - - msg->status = ret; - spi_finalize_current_message(ctlr); - - mutex_unlock(&ctlr->io_mutex); - return; - } - } - - trace_spi_message_start(msg); - - if (ctlr->prepare_message) { - ret = ctlr->prepare_message(ctlr, msg); - if (ret) { - dev_err(&ctlr->dev, "failed to prepare message: %d\n", - ret); - msg->status = ret; - spi_finalize_current_message(ctlr); - goto out; - } - msg->prepared = true; - } - - ret = spi_map_msg(ctlr, msg); - if (ret) { - msg->status = ret; - spi_finalize_current_message(ctlr); - goto out; - } - - if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) { - list_for_each_entry(xfer, &msg->transfers, transfer_list) { - xfer->ptp_sts_word_pre = 0; - ptp_read_system_prets(xfer->ptp_sts); - } - } - - ret = ctlr->transfer_one_message(ctlr, msg); - if (ret) { - dev_err(&ctlr->dev, - "failed to transfer one message from queue: %d\n", - ret); - goto out; - } - -out: + ret = __spi_pump_transfer_message(ctlr, msg, was_busy); mutex_unlock(&ctlr->io_mutex); /* Prod the scheduler in case transfer_one() was busy waiting */ @@ -1839,6 +1846,7 @@ static int spi_init_queue(struct spi_controller *ctlr) { ctlr->running = false; ctlr->busy = false; + ctlr->queue_empty = true; ctlr->kworker = kthread_create_worker(0, dev_name(&ctlr->dev)); if (IS_ERR(ctlr->kworker)) { @@ -1936,11 +1944,20 @@ void spi_finalize_current_message(struct spi_controller *ctlr) mesg->prepared = false; - spin_lock_irqsave(&ctlr->queue_lock, flags); - ctlr->cur_msg = NULL; - ctlr->fallback = false; - kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); - spin_unlock_irqrestore(&ctlr->queue_lock, flags); + if (!mesg->sync) { + /* + * This message was sent via the async message queue. Handle + * the queue and kick the worker thread to do the + * idling/shutdown or send the next message if needed. + */ + spin_lock_irqsave(&ctlr->queue_lock, flags); + WARN(ctlr->cur_msg != mesg, + "Finalizing queued message that is not the current head of queue!"); + ctlr->cur_msg = NULL; + ctlr->fallback = false; + kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); + spin_unlock_irqrestore(&ctlr->queue_lock, flags); + } trace_spi_message_done(mesg); @@ -2043,6 +2060,7 @@ static int __spi_queued_transfer(struct spi_device *spi, msg->status = -EINPROGRESS; list_add_tail(&msg->queue, &ctlr->queue); + ctlr->queue_empty = false; if (!ctlr->busy && need_pump) kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); @@ -3938,6 +3956,39 @@ static int spi_async_locked(struct spi_device *spi, struct spi_message *message) } +static void __spi_transfer_message_noqueue(struct spi_controller *ctlr, struct spi_message *msg) +{ + bool was_busy; + int ret; + + mutex_lock(&ctlr->io_mutex); + + /* If another context is idling the device then wait */ + while (ctlr->idling) + usleep_range(10000, 11000); + + was_busy = READ_ONCE(ctlr->busy); + + ret = __spi_pump_transfer_message(ctlr, msg, was_busy); + if (ret) + goto out; + + if (!was_busy) { + kfree(ctlr->dummy_rx); + ctlr->dummy_rx = NULL; + kfree(ctlr->dummy_tx); + ctlr->dummy_tx = NULL; + if (ctlr->unprepare_transfer_hardware && + ctlr->unprepare_transfer_hardware(ctlr)) + dev_err(&ctlr->dev, + "failed to unprepare transfer hardware\n"); + spi_idle_runtime_pm(ctlr); + } + +out: + mutex_unlock(&ctlr->io_mutex); +} + /*-------------------------------------------------------------------------*/ /* @@ -3956,51 +4007,52 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message) DECLARE_COMPLETION_ONSTACK(done); int status; struct spi_controller *ctlr = spi->controller; - unsigned long flags; status = __spi_validate(spi, message); if (status != 0) return status; - message->complete = spi_complete; - message->context = &done; message->spi = spi; SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync); SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync); /* - * If we're not using the legacy transfer method then we will - * try to transfer in the calling context so special case. - * This code would be less tricky if we could remove the - * support for driver implemented message queues. + * Checking queue_empty here only guarantees async/sync message + * ordering when coming from the same context. It does not need to + * guard against reentrancy from a different context. The io_mutex + * will catch those cases. */ - if (ctlr->transfer == spi_queued_transfer) { - spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags); + if (READ_ONCE(ctlr->queue_empty)) { + message->sync = true; + message->actual_length = 0; + message->status = -EINPROGRESS; trace_spi_message_submit(message); - status = __spi_queued_transfer(spi, message, false); + SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync_immediate); + SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, spi_sync_immediate); - spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags); - } else { - status = spi_async_locked(spi, message); + __spi_transfer_message_noqueue(ctlr, message); + + return message->status; } + /* + * There are messages in the async queue that could have originated + * from the same context, so we need to preserve ordering. + * Therefor we send the message to the async queue and wait until they + * are completed. + */ + message->complete = spi_complete; + message->context = &done; + status = spi_async_locked(spi, message); if (status == 0) { - /* Push out the messages in the calling context if we can */ - if (ctlr->transfer == spi_queued_transfer) { - SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, - spi_sync_immediate); - SPI_STATISTICS_INCREMENT_FIELD(spi->pcpu_statistics, - spi_sync_immediate); - __spi_pump_messages(ctlr, false); - } - wait_for_completion(&done); status = message->status; } message->context = NULL; + return status; } diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 1a75c26742f2..74261a83b5fa 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -461,6 +461,8 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch * @irq_flags: Interrupt enable state during PTP system timestamping * @fallback: fallback to pio if dma transfer return failure with * SPI_TRANS_FAIL_NO_START. + * @queue_empty: signal green light for opportunistically skipping the queue + * for spi_sync transfers. * * Each SPI controller can communicate with one or more @spi_device * children. These make a small bus, sharing MOSI, MISO and SCK signals @@ -677,6 +679,9 @@ struct spi_controller { /* Interrupt enable state during PTP system timestamping */ unsigned long irq_flags; + + /* Flag for enabling opportunistic skipping of the queue in spi_sync */ + bool queue_empty; }; static inline void *spi_controller_get_devdata(struct spi_controller *ctlr) @@ -986,6 +991,7 @@ struct spi_transfer { * @state: for use by whichever driver currently owns the message * @resources: for resource management when the spi message is processed * @prepared: spi_prepare_message was called for the this message + * @sync: this message took the direct sync path skipping the async queue * * A @spi_message is used to execute an atomic sequence of data transfers, * each represented by a struct spi_transfer. The sequence is "atomic" @@ -1037,7 +1043,10 @@ struct spi_message { struct list_head resources; /* spi_prepare_message was called for this message */ - bool prepared; + bool prepared; + + /* this message is skipping the async queue */ + bool sync; }; static inline void spi_message_init_no_memset(struct spi_message *m) From patchwork Tue Jun 21 06:12:26 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: David Jander X-Patchwork-Id: 583822 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 94875C433EF for ; Tue, 21 Jun 2022 06:12:46 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1345601AbiFUGMo (ORCPT ); Tue, 21 Jun 2022 02:12:44 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:39756 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S229547AbiFUGMo (ORCPT ); Tue, 21 Jun 2022 02:12:44 -0400 Received: from smtp15.bhosted.nl (smtp15.bhosted.nl [IPv6:2a02:9e0:8000::26]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 65BA61C121 for ; Mon, 20 Jun 2022 23:12:40 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=protonic.nl; s=202111; h=content-transfer-encoding:mime-version:references:in-reply-to:message-id:date: subject:cc:to:from:from; bh=H4ti9M99B6lVzGb3ljKte2Ib95Nd6XVISFe9+7VW/Zk=; b=M4TFpv0RZ6bPiodd9R3E8itFA9cBmaWzOlKV0aei9OM8riITdIFMDtzG+YtDZ46NlUnabnrr/H+NR 7wHVmDQQGOERdAF44592ky2Jl2WUXzHCNXVGOFBz7ZnhTSWbFC8VlEPNN2xBT7VOXKNI5DEf7DdgeC FT3HqNzw0f4Y3+E8neRBk1ID8SwX9lLGc6/7PgzbbhYxlGxTDOxiyNw0fGHTBDUg0pxvjIEdIkbWEO OsMR0gH47LVcMOTGjgoOGqPtz+eMNO5ChUEdpcQAEYFUvmiL/7/5iHdPYfh5AlBaLdXU7vED/egQoa iX3zV4WqRA7RnZJ4UDCucpgLuG0haEQ== X-MSG-ID: 2603acfa-f129-11ec-ba03-0050569d3a82 From: David Jander To: Mark Brown Cc: linux-spi@vger.kernel.org, Marc Kleine-Budde , Andrew Lunn , David Jander Subject: [PATCH v3 03/11] spi: Lock controller idling transition inside the io_mutex Date: Tue, 21 Jun 2022 08:12:26 +0200 Message-Id: <20220621061234.3626638-4-david@protonic.nl> X-Mailer: git-send-email 2.32.0 In-Reply-To: <20220621061234.3626638-1-david@protonic.nl> References: <20220621061234.3626638-1-david@protonic.nl> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-spi@vger.kernel.org This way, the spi sync path does not need to deal with the idling transition. Signed-off-by: David Jander --- drivers/spi/spi.c | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 2d057d03c4f7..cfff2ff96fa0 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -1643,27 +1643,30 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread) unsigned long flags; int ret; + /* Take the IO mutex */ + mutex_lock(&ctlr->io_mutex); + /* Lock queue */ spin_lock_irqsave(&ctlr->queue_lock, flags); /* Make sure we are not already running a message */ if (ctlr->cur_msg) { spin_unlock_irqrestore(&ctlr->queue_lock, flags); - return; + goto out_unlock; } /* If another context is idling the device then defer */ if (ctlr->idling) { kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); spin_unlock_irqrestore(&ctlr->queue_lock, flags); - return; + goto out_unlock; } /* Check if the queue is idle */ if (list_empty(&ctlr->queue) || !ctlr->running) { if (!ctlr->busy) { spin_unlock_irqrestore(&ctlr->queue_lock, flags); - return; + goto out_unlock; } /* Defer any non-atomic teardown to the thread */ @@ -1679,7 +1682,7 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread) &ctlr->pump_messages); } spin_unlock_irqrestore(&ctlr->queue_lock, flags); - return; + goto out_unlock; } ctlr->busy = false; @@ -1701,7 +1704,7 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread) ctlr->idling = false; ctlr->queue_empty = true; spin_unlock_irqrestore(&ctlr->queue_lock, flags); - return; + goto out_unlock; } /* Extract head of queue */ @@ -1715,13 +1718,16 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread) ctlr->busy = true; spin_unlock_irqrestore(&ctlr->queue_lock, flags); - mutex_lock(&ctlr->io_mutex); ret = __spi_pump_transfer_message(ctlr, msg, was_busy); mutex_unlock(&ctlr->io_mutex); /* Prod the scheduler in case transfer_one() was busy waiting */ if (!ret) cond_resched(); + return; + +out_unlock: + mutex_unlock(&ctlr->io_mutex); } /** From patchwork Tue Jun 21 06:12:30 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: David Jander X-Patchwork-Id: 583821 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 8DD1ACCA483 for ; Tue, 21 Jun 2022 06:12:48 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1345583AbiFUGMs (ORCPT ); Tue, 21 Jun 2022 02:12:48 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:39778 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1345600AbiFUGMo (ORCPT ); Tue, 21 Jun 2022 02:12:44 -0400 Received: from smtp15.bhosted.nl (smtp15.bhosted.nl [IPv6:2a02:9e0:8000::26]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id DCC6818341 for ; Mon, 20 Jun 2022 23:12:40 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=protonic.nl; s=202111; h=content-transfer-encoding:mime-version:references:in-reply-to:message-id:date: subject:cc:to:from:from; bh=SgpqjMAEVJLKSwqAw6RLOsp1PydTE3FJ3MKPVMhSyRY=; b=eVqReuxAtAK60xGFvDJvfwFmGzXq9lsM6SOcS8pWDyWtfONaqBRK2X0dCKYWIdCwsQPOiLakBPLUC ffPjmRXQHybQl8HjLRR0bTb9vp2PCC9XJTUNIywMYIiymT9tRMZf3IPT88Fp2Dz1rvapx4KR9tuRiO CXwm23s/aY0e1k6eZCwTwrcxZ+sSFshYezt4QtV/pIPlan/1qE0Krwt6zn/vY91IYc7NaKAYmr7Qq/ 1zXDSja+nZYpj8GDow+O8xjQqmMtiiBwR8qjhf/ba7xbRzzgFLwMkfYP42sdHptArdUWXd874qX+rB keOFP+qld94W0VJsUWSC/oEOSSnAzvw== X-MSG-ID: 267e0d32-f129-11ec-ba03-0050569d3a82 From: David Jander To: Mark Brown Cc: linux-spi@vger.kernel.org, Marc Kleine-Budde , Andrew Lunn , David Jander Subject: [PATCH v3 07/11] spi: Remove the now unused ctlr->idling flag Date: Tue, 21 Jun 2022 08:12:30 +0200 Message-Id: <20220621061234.3626638-8-david@protonic.nl> X-Mailer: git-send-email 2.32.0 In-Reply-To: <20220621061234.3626638-1-david@protonic.nl> References: <20220621061234.3626638-1-david@protonic.nl> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-spi@vger.kernel.org The ctlr->idling flag is never checked now, so we don't need to set it either. Signed-off-by: David Jander --- drivers/spi/spi.c | 2 -- include/linux/spi/spi.h | 2 -- 2 files changed, 4 deletions(-) diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 71b767a9ad77..52736e339645 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -1674,7 +1674,6 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread) } ctlr->busy = false; - ctlr->idling = true; spin_unlock_irqrestore(&ctlr->queue_lock, flags); kfree(ctlr->dummy_rx); @@ -1689,7 +1688,6 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread) trace_spi_controller_idle(ctlr); spin_lock_irqsave(&ctlr->queue_lock, flags); - ctlr->idling = false; ctlr->queue_empty = true; goto out_unlock; } diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 74261a83b5fa..c58f46be762f 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -383,7 +383,6 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch * @pump_messages: work struct for scheduling work to the message pump * @queue_lock: spinlock to syncronise access to message queue * @queue: message queue - * @idling: the device is entering idle state * @cur_msg: the currently in-flight message * @cur_msg_mapped: message has been mapped for DMA * @last_cs: the last chip_select that is recorded by set_cs, -1 on non chip @@ -616,7 +615,6 @@ struct spi_controller { spinlock_t queue_lock; struct list_head queue; struct spi_message *cur_msg; - bool idling; bool busy; bool running; bool rt; From patchwork Tue Jun 21 06:12:33 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: David Jander X-Patchwork-Id: 583817 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 83C45CCA481 for ; Tue, 21 Jun 2022 06:12:52 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1345618AbiFUGMu (ORCPT ); Tue, 21 Jun 2022 02:12:50 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:39778 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1345597AbiFUGMp (ORCPT ); Tue, 21 Jun 2022 02:12:45 -0400 Received: from smtp16.bhosted.nl (smtp16.bhosted.nl [IPv6:2a02:9e0:8000::27]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 99AB42125F for ; Mon, 20 Jun 2022 23:12:40 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=protonic.nl; s=202111; h=content-transfer-encoding:mime-version:references:in-reply-to:message-id:date: subject:cc:to:from:from; bh=sTyj/g7vMjUKwofVqknJ3ZBfE1RGq7JZW5LoUBnzr1g=; b=uyb6O1Z906tIDGNX3YxNMfmOcYnNIgaexebD0O65WSbpxqpzxaqksdkD2PAy1ySIBZe7mhHdaZij7 ZjtAjXx0SMDodWotOCfnUxX3sHF3H46IFo+C56Gi5cYnu4DbNNMTqG0hfsxQz/wFSOgO/vSuJ0JI4V 8GifO1c76hjP6bakV02l4nJC+De0uF4xaXIjfce62Ivx2HoDiuFqLyGP+CYuuf97ka3gQFK2OSjZkf AqBzA7CA0izKGjnN50T4OZFExm8g/Jmr+ZjM6Juyif1AF4yYvE0zYtlbjqKJStfDhOKJbf2+AK8hBm 0yDLtGNi/6aKzcg59gqgjmp2pLyjEHQ== X-MSG-ID: 2684fa97-f129-11ec-9051-0050569d2c73 From: David Jander To: Mark Brown Cc: linux-spi@vger.kernel.org, Marc Kleine-Budde , Andrew Lunn , David Jander Subject: [PATCH v3 10/11] spi: Ensure the io_mutex is held until spi_finalize_current_message() Date: Tue, 21 Jun 2022 08:12:33 +0200 Message-Id: <20220621061234.3626638-11-david@protonic.nl> X-Mailer: git-send-email 2.32.0 In-Reply-To: <20220621061234.3626638-1-david@protonic.nl> References: <20220621061234.3626638-1-david@protonic.nl> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-spi@vger.kernel.org This patch introduces a completion that is completed in spi_finalize_current_message() and waited for in __spi_pump_transfer_message(). This way all manipulation of ctlr->cur_msg is done with the io_mutex held and strictly ordered: __spi_pump_transfer_message() will not return until spi_finalize_current_message() is done using ctlr->cur_msg, and its calling context is only touching ctlr->cur_msg after returning. Due to this, we can safely drop the spin-locks around ctlr->cur_msg. Signed-off-by: David Jander --- drivers/spi/spi.c | 32 ++++++++++++++------------------ include/linux/spi/spi.h | 6 ++---- 2 files changed, 16 insertions(+), 22 deletions(-) diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 3df84f43918c..db08cb868652 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -1613,11 +1613,14 @@ static int __spi_pump_transfer_message(struct spi_controller *ctlr, } } + reinit_completion(&ctlr->cur_msg_completion); ret = ctlr->transfer_one_message(ctlr, msg); if (ret) { dev_err(&ctlr->dev, "failed to transfer one message from queue\n"); return ret; + } else { + wait_for_completion(&ctlr->cur_msg_completion); } return 0; @@ -1704,6 +1707,12 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread) spin_unlock_irqrestore(&ctlr->queue_lock, flags); ret = __spi_pump_transfer_message(ctlr, msg, was_busy); + + if (!ret) + kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); + ctlr->cur_msg = NULL; + ctlr->fallback = false; + mutex_unlock(&ctlr->io_mutex); /* Prod the scheduler in case transfer_one() was busy waiting */ @@ -1897,12 +1906,9 @@ void spi_finalize_current_message(struct spi_controller *ctlr) { struct spi_transfer *xfer; struct spi_message *mesg; - unsigned long flags; int ret; - spin_lock_irqsave(&ctlr->queue_lock, flags); mesg = ctlr->cur_msg; - spin_unlock_irqrestore(&ctlr->queue_lock, flags); if (!ctlr->ptp_sts_supported && !ctlr->transfer_one) { list_for_each_entry(xfer, &mesg->transfers, transfer_list) { @@ -1936,20 +1942,7 @@ void spi_finalize_current_message(struct spi_controller *ctlr) mesg->prepared = false; - if (!mesg->sync) { - /* - * This message was sent via the async message queue. Handle - * the queue and kick the worker thread to do the - * idling/shutdown or send the next message if needed. - */ - spin_lock_irqsave(&ctlr->queue_lock, flags); - WARN(ctlr->cur_msg != mesg, - "Finalizing queued message that is not the current head of queue!"); - ctlr->cur_msg = NULL; - ctlr->fallback = false; - kthread_queue_work(ctlr->kworker, &ctlr->pump_messages); - spin_unlock_irqrestore(&ctlr->queue_lock, flags); - } + complete(&ctlr->cur_msg_completion); trace_spi_message_done(mesg); @@ -3036,6 +3029,7 @@ int spi_register_controller(struct spi_controller *ctlr) } ctlr->bus_lock_flag = 0; init_completion(&ctlr->xfer_completion); + init_completion(&ctlr->cur_msg_completion); if (!ctlr->max_dma_len) ctlr->max_dma_len = INT_MAX; @@ -3962,6 +3956,9 @@ static void __spi_transfer_message_noqueue(struct spi_controller *ctlr, struct s if (ret) goto out; + ctlr->cur_msg = NULL; + ctlr->fallback = false; + if (!was_busy) { kfree(ctlr->dummy_rx); ctlr->dummy_rx = NULL; @@ -4013,7 +4010,6 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message) * will catch those cases. */ if (READ_ONCE(ctlr->queue_empty)) { - message->sync = true; message->actual_length = 0; message->status = -EINPROGRESS; diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index c58f46be762f..c56e0d240a58 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -384,6 +384,7 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch * @queue_lock: spinlock to syncronise access to message queue * @queue: message queue * @cur_msg: the currently in-flight message + * @cur_msg_completion: a completion for the current in-flight message * @cur_msg_mapped: message has been mapped for DMA * @last_cs: the last chip_select that is recorded by set_cs, -1 on non chip * selected @@ -615,6 +616,7 @@ struct spi_controller { spinlock_t queue_lock; struct list_head queue; struct spi_message *cur_msg; + struct completion cur_msg_completion; bool busy; bool running; bool rt; @@ -989,7 +991,6 @@ struct spi_transfer { * @state: for use by whichever driver currently owns the message * @resources: for resource management when the spi message is processed * @prepared: spi_prepare_message was called for the this message - * @sync: this message took the direct sync path skipping the async queue * * A @spi_message is used to execute an atomic sequence of data transfers, * each represented by a struct spi_transfer. The sequence is "atomic" @@ -1042,9 +1043,6 @@ struct spi_message { /* spi_prepare_message was called for this message */ bool prepared; - - /* this message is skipping the async queue */ - bool sync; }; static inline void spi_message_init_no_memset(struct spi_message *m) From patchwork Tue Jun 21 06:12:34 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: David Jander X-Patchwork-Id: 583819 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id D950CC433EF for ; Tue, 21 Jun 2022 06:12:48 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1345600AbiFUGMs (ORCPT ); Tue, 21 Jun 2022 02:12:48 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:39800 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1345614AbiFUGMp (ORCPT ); Tue, 21 Jun 2022 02:12:45 -0400 Received: from smtp16.bhosted.nl (smtp16.bhosted.nl [IPv6:2a02:9e0:8000::27]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 47A541A3A8 for ; Mon, 20 Jun 2022 23:12:40 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=protonic.nl; s=202111; h=content-transfer-encoding:mime-version:references:in-reply-to:message-id:date: subject:cc:to:from:from; bh=jg6S3PjeI5qTEyeBZrsTnAtoxgxaUwKcgRhBM8dhK4U=; b=PO5QPFnEEWImRVVx3mont9gJX77xeNkcWaXl59NTLRltmdDnUggYXxE4D45jJMqn5g2SkQJ4hQsMx vmi5BSqvmtWAgA5qtgLm8JBiXtRNwfBU7dpOCQgXAexinFDeF+qJWNb2DzGcYX/A5yxqI6xhKtalS0 kWgjwfbPtiJLzmo8tY3PMe/tKkxkpdC3mUKprfF9ejyd97N7J+lYLHSrrRs7JzWucjm3ZI/+PKdaWd ms1KPf4pJG6mdrgpIVcnHLXRjF9Aw1S2aDmYSmahWLXtwOzGgmf443iEhTST2xzx6q8GJRvuz/kvGx yg6oSpRQPvIRqDCEkL7hnkRCc1VyEMw== X-MSG-ID: 26c95a0c-f129-11ec-9051-0050569d2c73 From: David Jander To: Mark Brown Cc: linux-spi@vger.kernel.org, Marc Kleine-Budde , Andrew Lunn , David Jander Subject: [PATCH v3 11/11] spi: opportunistically skip ctlr->cur_msg_completion Date: Tue, 21 Jun 2022 08:12:34 +0200 Message-Id: <20220621061234.3626638-12-david@protonic.nl> X-Mailer: git-send-email 2.32.0 In-Reply-To: <20220621061234.3626638-1-david@protonic.nl> References: <20220621061234.3626638-1-david@protonic.nl> MIME-Version: 1.0 Precedence: bulk List-ID: X-Mailing-List: linux-spi@vger.kernel.org There are only a few drivers that do not call spi_finalize_current_message() in the context of transfer_one_message(), and even for those cases the completion ctlr->cur_msg_completion is not needed always. The calls to complete() and wait_for_completion() each take a spin-lock, which is costly. This patch makes it possible to avoid those calls in the big majority of cases, by introducing two flags that with the help of ordering via barriers can avoid using the completion safely. In case of a race with the context calling spi_finalize_current_message(), the scheme errs on the safe side and takes the completion. The impact of this patch is worth the effort: On a i.MX8MM SoC, the time the SPI bus is idle between two consecutive calls to spi_sync(), is reduced from 19.6us to 16.8us... roughly 15%. Signed-off-by: David Jander --- drivers/spi/spi.c | 27 +++++++++++++++++++++++++-- include/linux/spi/spi.h | 8 ++++++++ 2 files changed, 33 insertions(+), 2 deletions(-) diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index db08cb868652..ef37f043fd17 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -1613,14 +1613,34 @@ static int __spi_pump_transfer_message(struct spi_controller *ctlr, } } + /* + * Drivers implementation of transfer_one_message() must arrange for + * spi_finalize_current_message() to get called. Most drivers will do + * this in the calling context, but some don't. For those cases, a + * completion is used to guarantee that this function does not return + * until spi_finalize_current_message() is done accessing + * ctlr->cur_msg. + * Use of the following two flags enable to opportunistically skip the + * use of the completion since its use involves expensive spin locks. + * In case of a race with the context that calls + * spi_finalize_current_message() the completion will always be used, + * due to strict ordering of these flags using barriers. + */ + WRITE_ONCE(ctlr->cur_msg_incomplete, true); + WRITE_ONCE(ctlr->cur_msg_need_completion, false); reinit_completion(&ctlr->cur_msg_completion); + smp_wmb(); /* make these available to spi_finalize_current_message */ + ret = ctlr->transfer_one_message(ctlr, msg); if (ret) { dev_err(&ctlr->dev, "failed to transfer one message from queue\n"); return ret; } else { - wait_for_completion(&ctlr->cur_msg_completion); + WRITE_ONCE(ctlr->cur_msg_need_completion, true); + smp_mb(); /* see spi_finalize_current_message()... */ + if (READ_ONCE(ctlr->cur_msg_incomplete)) + wait_for_completion(&ctlr->cur_msg_completion); } return 0; @@ -1942,7 +1962,10 @@ void spi_finalize_current_message(struct spi_controller *ctlr) mesg->prepared = false; - complete(&ctlr->cur_msg_completion); + WRITE_ONCE(ctlr->cur_msg_incomplete, false); + smp_mb(); /* See __spi_pump_transfer_message()... */ + if (READ_ONCE(ctlr->cur_msg_need_completion)) + complete(&ctlr->cur_msg_completion); trace_spi_message_done(mesg); diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index c56e0d240a58..eb0d316e3c36 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -385,6 +385,12 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch * @queue: message queue * @cur_msg: the currently in-flight message * @cur_msg_completion: a completion for the current in-flight message + * @cur_msg_incomplete: Flag used internally to opportunistically skip + * the @cur_msg_completion. This flag is used to check if the driver has + * already called spi_finalize_current_message(). + * @cur_msg_need_completion: Flag used internally to opportunistically skip + * the @cur_msg_completion. This flag is used to signal the context that + * is running spi_finalize_current_message() that it needs to complete() * @cur_msg_mapped: message has been mapped for DMA * @last_cs: the last chip_select that is recorded by set_cs, -1 on non chip * selected @@ -617,6 +623,8 @@ struct spi_controller { struct list_head queue; struct spi_message *cur_msg; struct completion cur_msg_completion; + bool cur_msg_incomplete; + bool cur_msg_need_completion; bool busy; bool running; bool rt;