@@ -63,6 +63,8 @@ static void qce_aead_done(void *data)
sg_free_table(&rctx->dst_tbl);
}
+ qce_bam_unlock(qce);
+
error = qce_check_status(qce, &status);
if (error < 0 && (error != -EBADMSG))
dev_err(qce->dev, "aead operation error (%x)\n", status);
@@ -433,6 +435,8 @@ qce_aead_async_req_handle(struct crypto_async_request *async_req)
else
rctx->assoclen = req->assoclen;
+ qce_bam_lock(qce);
+
diff_dst = (req->src != req->dst) ? true : false;
dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
@@ -565,6 +565,36 @@ int qce_start(struct crypto_async_request *async_req, u32 type)
#define STATUS_ERRORS \
(BIT(SW_ERR_SHIFT) | BIT(AXI_ERR_SHIFT) | BIT(HSD_ERR_SHIFT))
+void qce_bam_lock(struct qce_device *qce)
+{
+ int ret;
+
+ qce_clear_bam_transaction(qce);
+
+ /* This is just a dummy write to acquire the lock on the BAM pipe. */
+ qce_write(qce, REG_AUTH_SEG_CFG, 0);
+
+ ret = qce_submit_cmd_desc(qce, QCE_DMA_DESC_FLAG_LOCK);
+ if (ret)
+ dev_err(qce->dev,
+ "Failed to lock the command descriptor: %d\n", ret);
+}
+
+void qce_bam_unlock(struct qce_device *qce)
+{
+ int ret;
+
+ qce_clear_bam_transaction(qce);
+
+ /* This just dummy write to release the lock on the BAM pipe. */
+ qce_write(qce, REG_AUTH_SEG_CFG, 0);
+
+ ret = qce_submit_cmd_desc(qce, QCE_DMA_DESC_FLAG_UNLOCK);
+ if (ret)
+ dev_err(qce->dev,
+ "Failed to unlock the command descriptor: %d\n", ret);
+}
+
int qce_check_status(struct qce_device *qce, u32 *status)
{
int ret = 0;
@@ -65,4 +65,7 @@ struct qce_algo_ops {
int (*async_req_handle)(struct crypto_async_request *async_req);
};
+void qce_bam_lock(struct qce_device *qce);
+void qce_bam_unlock(struct qce_device *qce);
+
#endif /* _CORE_H_ */
@@ -80,6 +80,10 @@ int qce_submit_cmd_desc(struct qce_device *qce, unsigned long flags)
int ret = 0;
desc_flags = DMA_PREP_CMD;
+ if (flags & QCE_DMA_DESC_FLAG_LOCK)
+ desc_flags |= DMA_PREP_LOCK;
+ else if (flags & QCE_DMA_DESC_FLAG_UNLOCK)
+ desc_flags |= DMA_PREP_UNLOCK;
/*
* The HPG recommends always using the consumer pipe for command
@@ -21,6 +21,8 @@ struct qce_device;
#define QCE_BAM_CMD_ELEMENT_SIZE 64
#define QCE_DMA_DESC_FLAG_BAM_NWD (0x0004)
#define QCE_MAX_REG_READ 8
+#define QCE_DMA_DESC_FLAG_LOCK (0x0002)
+#define QCE_DMA_DESC_FLAG_UNLOCK (0x0001)
struct qce_result_dump {
@@ -60,6 +60,8 @@ static void qce_ahash_done(void *data)
rctx->byte_count[0] = cpu_to_be32(result->auth_byte_count[0]);
rctx->byte_count[1] = cpu_to_be32(result->auth_byte_count[1]);
+ qce_bam_unlock(qce);
+
error = qce_check_status(qce, &status);
if (error < 0)
dev_dbg(qce->dev, "ahash operation error (%x)\n", status);
@@ -90,6 +92,8 @@ static int qce_ahash_async_req_handle(struct crypto_async_request *async_req)
rctx->authklen = AES_KEYSIZE_128;
}
+ qce_bam_lock(qce);
+
rctx->src_nents = sg_nents_for_len(req->src, req->nbytes);
if (rctx->src_nents < 0) {
dev_err(qce->dev, "Invalid numbers of src SG.\n");
@@ -52,6 +52,8 @@ static void qce_skcipher_done(void *data)
sg_free_table(&rctx->dst_tbl);
+ qce_bam_unlock(qce);
+
error = qce_check_status(qce, &status);
if (error < 0)
dev_dbg(qce->dev, "skcipher operation error (%x)\n", status);
@@ -82,6 +84,8 @@ qce_skcipher_async_req_handle(struct crypto_async_request *async_req)
dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
+ qce_bam_lock(qce);
+
rctx->src_nents = sg_nents_for_len(req->src, req->cryptlen);
if (diff_dst)
rctx->dst_nents = sg_nents_for_len(req->dst, req->cryptlen);