@@ -258,21 +258,20 @@ static int aspeed_sk_start_sg(struct aspeed_hace_dev *hace_dev)
total = req->cryptlen;
for_each_sg(req->src, s, src_sg_len, i) {
- src_list[i].phy_addr = sg_dma_address(s);
+ u32 phy_addr = sg_dma_address(s);
+ u32 len = sg_dma_len(s);
- if (total > sg_dma_len(s)) {
- src_list[i].len = sg_dma_len(s);
- total -= src_list[i].len;
-
- } else {
+ if (total > len)
+ total -= len;
+ else {
/* last sg list */
- src_list[i].len = total;
- src_list[i].len |= BIT(31);
+ len = total;
+ len |= BIT(31);
total = 0;
}
- src_list[i].phy_addr = cpu_to_le32(src_list[i].phy_addr);
- src_list[i].len = cpu_to_le32(src_list[i].len);
+ src_list[i].phy_addr = cpu_to_le32(phy_addr);
+ src_list[i].len = cpu_to_le32(len);
}
if (total != 0) {
@@ -290,21 +289,20 @@ static int aspeed_sk_start_sg(struct aspeed_hace_dev *hace_dev)
total = req->cryptlen;
for_each_sg(req->dst, s, dst_sg_len, i) {
- dst_list[i].phy_addr = sg_dma_address(s);
-
- if (total > sg_dma_len(s)) {
- dst_list[i].len = sg_dma_len(s);
- total -= dst_list[i].len;
+ u32 phy_addr = sg_dma_address(s);
+ u32 len = sg_dma_len(s);
- } else {
+ if (total > len)
+ total -= len;
+ else {
/* last sg list */
- dst_list[i].len = total;
- dst_list[i].len |= BIT(31);
+ len = total;
+ len |= BIT(31);
total = 0;
}
- dst_list[i].phy_addr = cpu_to_le32(dst_list[i].phy_addr);
- dst_list[i].len = cpu_to_le32(dst_list[i].len);
+ dst_list[i].phy_addr = cpu_to_le32(phy_addr);
+ dst_list[i].len = cpu_to_le32(len);
}
@@ -731,7 +729,7 @@ static void aspeed_crypto_cra_exit(struct crypto_skcipher *tfm)
crypto_free_skcipher(ctx->fallback_tfm);
}
-struct aspeed_hace_alg aspeed_crypto_algs[] = {
+static struct aspeed_hace_alg aspeed_crypto_algs[] = {
{
.alg.skcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
@@ -1019,7 +1017,7 @@ struct aspeed_hace_alg aspeed_crypto_algs[] = {
},
};
-struct aspeed_hace_alg aspeed_crypto_algs_g6[] = {
+static struct aspeed_hace_alg aspeed_crypto_algs_g6[] = {
{
.alg.skcipher = {
.ivsize = AES_BLOCK_SIZE,
@@ -208,6 +208,9 @@ static int aspeed_ahash_dma_prepare_sg(struct aspeed_hace_dev *hace_dev)
}
if (rctx->bufcnt != 0) {
+ u32 phy_addr;
+ u32 len;
+
rctx->buffer_dma_addr = dma_map_single(hace_dev->dev,
rctx->buffer,
rctx->block_size * 2,
@@ -218,36 +221,35 @@ static int aspeed_ahash_dma_prepare_sg(struct aspeed_hace_dev *hace_dev)
goto free_rctx_digest;
}
- src_list[0].phy_addr = rctx->buffer_dma_addr;
- src_list[0].len = rctx->bufcnt;
- length -= src_list[0].len;
+ phy_addr = rctx->buffer_dma_addr;
+ len = rctx->bufcnt;
+ length -= len;
/* Last sg list */
if (length == 0)
- src_list[0].len |= HASH_SG_LAST_LIST;
+ len |= HASH_SG_LAST_LIST;
- src_list[0].phy_addr = cpu_to_le32(src_list[0].phy_addr);
- src_list[0].len = cpu_to_le32(src_list[0].len);
+ src_list[0].phy_addr = cpu_to_le32(phy_addr);
+ src_list[0].len = cpu_to_le32(len);
src_list++;
}
if (length != 0) {
for_each_sg(rctx->src_sg, s, sg_len, i) {
- src_list[i].phy_addr = sg_dma_address(s);
-
- if (length > sg_dma_len(s)) {
- src_list[i].len = sg_dma_len(s);
- length -= sg_dma_len(s);
+ u32 phy_addr = sg_dma_address(s);
+ u32 len = sg_dma_len(s);
- } else {
+ if (length > len)
+ length -= len;
+ else {
/* Last sg list */
- src_list[i].len = length;
- src_list[i].len |= HASH_SG_LAST_LIST;
+ len = length;
+ len |= HASH_SG_LAST_LIST;
length = 0;
}
- src_list[i].phy_addr = cpu_to_le32(src_list[i].phy_addr);
- src_list[i].len = cpu_to_le32(src_list[i].len);
+ src_list[i].phy_addr = cpu_to_le32(phy_addr);
+ src_list[i].len = cpu_to_le32(len);
}
}
@@ -913,7 +915,7 @@ static int aspeed_sham_import(struct ahash_request *req, const void *in)
return 0;
}
-struct aspeed_hace_alg aspeed_ahash_algs[] = {
+static struct aspeed_hace_alg aspeed_ahash_algs[] = {
{
.alg.ahash = {
.init = aspeed_sham_init,
@@ -1099,7 +1101,7 @@ struct aspeed_hace_alg aspeed_ahash_algs[] = {
},
};
-struct aspeed_hace_alg aspeed_ahash_algs_g6[] = {
+static struct aspeed_hace_alg aspeed_ahash_algs_g6[] = {
{
.alg.ahash = {
.init = aspeed_sham_init,
This patch fixes a bunch of bit endianness warnings and two missing static modifiers. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>