Message ID | 1430310338.27241.45.camel@linaro.org |
---|---|
State | New |
Headers | show |
On Wed, 2015-04-29 at 13:25 +0100, Jon Medhurst (Tixy) wrote: > diff --git a/drivers/mailbox/scpi_protocol.c > b/drivers/mailbox/scpi_protocol.c > index c74575b..5818d9b 100644 > --- a/drivers/mailbox/scpi_protocol.c > +++ b/drivers/mailbox/scpi_protocol.c > @@ -286,14 +286,23 @@ static void scpi_tx_prepare(struct mbox_client > *c, void *msg) > struct scpi_chan *ch = container_of(c, struct scpi_chan, cl); > struct scpi_shared_mem *mem = (struct scpi_shared_mem > *)ch->tx_payload; > > - mem->command = cpu_to_le32(t->cmd); > if (t->tx_buf) > memcpy_toio(mem->payload, t->tx_buf, t->tx_len); > if (t->rx_buf) { > + int token; > spin_lock_irqsave(&ch->rx_lock, flags); > + /* > + * Presumably we can do this token setting outside > + * spinlock and still be safe from concurrency? > + */ To answer my own question, yes, the four lines below can be moved up above the spin_lock_irqsave. Because we had better be safe from concurrency here as we are also writing to the channel's shared memory area. > + do > + token = (++ch->token) & CMD_TOKEN_ID_MASK; > + while(!token); > + t->cmd |= token << CMD_TOKEN_ID_SHIFT; > list_add_tail(&t->node, &ch->rx_pending); > spin_unlock_irqrestore(&ch->rx_lock, flags); > } > + mem->command = cpu_to_le32(t->cmd); > } > > static struct scpi_xfer *get_scpi_xfer(struct scpi_chan *ch)
diff --git a/drivers/mailbox/scpi_protocol.c b/drivers/mailbox/scpi_protocol.c index c74575b..5818d9b 100644 --- a/drivers/mailbox/scpi_protocol.c +++ b/drivers/mailbox/scpi_protocol.c @@ -286,14 +286,23 @@ static void scpi_tx_prepare(struct mbox_client *c, void *msg) struct scpi_chan *ch = container_of(c, struct scpi_chan, cl); struct scpi_shared_mem *mem = (struct scpi_shared_mem *)ch->tx_payload; - mem->command = cpu_to_le32(t->cmd); if (t->tx_buf) memcpy_toio(mem->payload, t->tx_buf, t->tx_len); if (t->rx_buf) { + int token; spin_lock_irqsave(&ch->rx_lock, flags); + /* + * Presumably we can do this token setting outside + * spinlock and still be safe from concurrency? + */ + do + token = (++ch->token) & CMD_TOKEN_ID_MASK; + while(!token); + t->cmd |= token << CMD_TOKEN_ID_SHIFT; list_add_tail(&t->node, &ch->rx_pending); spin_unlock_irqrestore(&ch->rx_lock, flags); } + mem->command = cpu_to_le32(t->cmd); } static struct scpi_xfer *get_scpi_xfer(struct scpi_chan *ch) @@ -322,7 +331,7 @@ static int scpi_send_message(u8 cmd, void *tx_buf, unsigned int len, void *rx_buf) { int ret; - u8 token, chan; + u8 chan; struct scpi_xfer *msg; struct scpi_chan *scpi_chan; @@ -333,10 +342,8 @@ scpi_send_message(u8 cmd, void *tx_buf, unsigned int len, void *rx_buf) if (!msg) return -ENOMEM; - token = atomic_inc_return(&scpi_chan->token) & CMD_TOKEN_ID_MASK; - msg->slot = BIT(SCPI_SLOT); - msg->cmd = PACK_SCPI_CMD(cmd, token, len); + msg->cmd = PACK_SCPI_CMD(cmd, 0, len); msg->tx_buf = tx_buf; msg->tx_len = len; msg->rx_buf = rx_buf;