From patchwork Tue Sep 29 17:44:23 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Rohit Maheshwari X-Patchwork-Id: 289377 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-12.8 required=3.0 tests=BAYES_00, HEADER_FROM_DIFFERENT_DOMAINS, INCLUDES_PATCH, MAILING_LIST_MULTI, SIGNED_OFF_BY, SPF_HELO_NONE,SPF_PASS,URIBL_BLOCKED,USER_AGENT_GIT autolearn=ham autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id E2E44C4727C for ; Tue, 29 Sep 2020 17:44:43 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by mail.kernel.org (Postfix) with ESMTP id 99647208B8 for ; Tue, 29 Sep 2020 17:44:43 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1730185AbgI2Rom (ORCPT ); Tue, 29 Sep 2020 13:44:42 -0400 Received: from stargate.chelsio.com ([12.32.117.8]:28822 "EHLO stargate.chelsio.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1730165AbgI2Rol (ORCPT ); Tue, 29 Sep 2020 13:44:41 -0400 Received: from localhost.localdomain (redhouse.blr.asicdesigners.com [10.193.185.57]) by stargate.chelsio.com (8.13.8/8.13.8) with ESMTP id 08THiVJw024825; Tue, 29 Sep 2020 10:44:35 -0700 From: Rohit Maheshwari To: kuba@kernel.org, netdev@vger.kernel.org, davem@davemloft.net Cc: secdev@chelsio.com, Rohit Maheshwari Subject: [net-next v3 1/3] ch_ktls: Issue if connection offload fails Date: Tue, 29 Sep 2020 23:14:23 +0530 Message-Id: <20200929174425.12256-2-rohitm@chelsio.com> X-Mailer: git-send-email 2.18.1 In-Reply-To: <20200929174425.12256-1-rohitm@chelsio.com> References: <20200929174425.12256-1-rohitm@chelsio.com> Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org Since driver first return success to tls_dev_add, if req to HW is successful, but later if HW returns failure, that connection traffic fails permanently and connection status remains unknown to stack. v1->v2: - removed conn_up from all places. v2->v3: - Corrected timeout handling. Fixes: 34aba2c45024 ("cxgb4/chcr : Register to tls add and del callback") Signed-off-by: Rohit Maheshwari --- .../chelsio/inline_crypto/ch_ktls/chcr_ktls.c | 283 +++++++++--------- .../chelsio/inline_crypto/ch_ktls/chcr_ktls.h | 17 +- 2 files changed, 153 insertions(+), 147 deletions(-) diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c index 4609f1f78426..c8db78b6775e 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c +++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c @@ -125,60 +125,6 @@ static int chcr_ktls_save_keys(struct chcr_ktls_info *tx_info, return ret; } -static int chcr_ktls_update_connection_state(struct chcr_ktls_info *tx_info, - int new_state) -{ - /* This function can be called from both rx (interrupt context) and tx - * queue contexts. - */ - spin_lock_bh(&tx_info->lock); - switch (tx_info->connection_state) { - case KTLS_CONN_CLOSED: - tx_info->connection_state = new_state; - break; - - case KTLS_CONN_ACT_OPEN_REQ: - /* only go forward if state is greater than current state. */ - if (new_state <= tx_info->connection_state) - break; - /* update to the next state and also initialize TCB */ - tx_info->connection_state = new_state; - fallthrough; - case KTLS_CONN_ACT_OPEN_RPL: - /* if we are stuck in this state, means tcb init might not - * received by HW, try sending it again. - */ - if (!chcr_init_tcb_fields(tx_info)) - tx_info->connection_state = KTLS_CONN_SET_TCB_REQ; - break; - - case KTLS_CONN_SET_TCB_REQ: - /* only go forward if state is greater than current state. */ - if (new_state <= tx_info->connection_state) - break; - /* update to the next state and check if l2t_state is valid */ - tx_info->connection_state = new_state; - fallthrough; - case KTLS_CONN_SET_TCB_RPL: - /* Check if l2t state is valid, then move to ready state. */ - if (cxgb4_check_l2t_valid(tx_info->l2te)) { - tx_info->connection_state = KTLS_CONN_TX_READY; - atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_ctx); - } - break; - - case KTLS_CONN_TX_READY: - /* nothing to be done here */ - break; - - default: - pr_err("unknown KTLS connection state\n"); - break; - } - spin_unlock_bh(&tx_info->lock); - - return tx_info->connection_state; -} /* * chcr_ktls_act_open_req: creates TCB entry for ipv4 connection. * @sk - tcp socket. @@ -298,27 +244,17 @@ static int chcr_setup_connection(struct sock *sk, return -EINVAL; tx_info->atid = atid; - tx_info->ip_family = sk->sk_family; - if (sk->sk_family == AF_INET) { - tx_info->ip_family = AF_INET; + if (tx_info->ip_family == AF_INET) { ret = chcr_ktls_act_open_req(sk, tx_info, atid); #if IS_ENABLED(CONFIG_IPV6) } else { - if (!sk->sk_ipv6only && - ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED) { - tx_info->ip_family = AF_INET; - ret = chcr_ktls_act_open_req(sk, tx_info, atid); - } else { - tx_info->ip_family = AF_INET6; - ret = cxgb4_clip_get(tx_info->netdev, - (const u32 *) - &sk->sk_v6_rcv_saddr.s6_addr, - 1); - if (ret) - goto out; - ret = chcr_ktls_act_open_req6(sk, tx_info, atid); - } + ret = cxgb4_clip_get(tx_info->netdev, (const u32 *) + &sk->sk_v6_rcv_saddr, + 1); + if (ret) + return ret; + ret = chcr_ktls_act_open_req6(sk, tx_info, atid); #endif } @@ -326,16 +262,21 @@ static int chcr_setup_connection(struct sock *sk, * success, if any other return type clear atid and return that failure. */ if (ret) { - if (ret == NET_XMIT_CN) + if (ret == NET_XMIT_CN) { ret = 0; - else + } else { +#if IS_ENABLED(CONFIG_IPV6) + /* clear clip entry */ + if (tx_info->ip_family == AF_INET6) + cxgb4_clip_release(tx_info->netdev, + (const u32 *) + &sk->sk_v6_rcv_saddr, + 1); +#endif cxgb4_free_atid(t, atid); - goto out; + } } - /* update the connection state */ - chcr_ktls_update_connection_state(tx_info, KTLS_CONN_ACT_OPEN_REQ); -out: return ret; } @@ -396,15 +337,9 @@ static void chcr_ktls_dev_del(struct net_device *netdev, struct chcr_ktls_ofld_ctx_tx *tx_ctx = chcr_get_ktls_tx_context(tls_ctx); struct chcr_ktls_info *tx_info = tx_ctx->chcr_info; - struct sock *sk; if (!tx_info) return; - sk = tx_info->sk; - - spin_lock(&tx_info->lock); - tx_info->connection_state = KTLS_CONN_CLOSED; - spin_unlock(&tx_info->lock); /* clear l2t entry */ if (tx_info->l2te) @@ -413,8 +348,8 @@ static void chcr_ktls_dev_del(struct net_device *netdev, #if IS_ENABLED(CONFIG_IPV6) /* clear clip entry */ if (tx_info->ip_family == AF_INET6) - cxgb4_clip_release(netdev, - (const u32 *)&sk->sk_v6_daddr.in6_u.u6_addr8, + cxgb4_clip_release(netdev, (const u32 *) + &tx_info->sk->sk_v6_rcv_saddr, 1); #endif @@ -461,30 +396,22 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk, pi = netdev_priv(netdev); adap = pi->adapter; + + atomic64_inc(&adap->ch_ktls_stats.ktls_tx_connection_open); if (direction == TLS_OFFLOAD_CTX_DIR_RX) { pr_err("not expecting for RX direction\n"); - ret = -EINVAL; goto out; } - if (tx_ctx->chcr_info) { - ret = -EINVAL; + + if (tx_ctx->chcr_info) goto out; - } tx_info = kvzalloc(sizeof(*tx_info), GFP_KERNEL); - if (!tx_info) { - ret = -ENOMEM; + if (!tx_info) goto out; - } - - spin_lock_init(&tx_info->lock); - - /* clear connection state */ - spin_lock(&tx_info->lock); - tx_info->connection_state = KTLS_CONN_CLOSED; - spin_unlock(&tx_info->lock); tx_info->sk = sk; + spin_lock_init(&tx_info->lock); /* initialize tid and atid to -1, 0 is a also a valid id. */ tx_info->tid = -1; tx_info->atid = -1; @@ -495,10 +422,12 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk, tx_info->tx_chan = pi->tx_chan; tx_info->smt_idx = pi->smt_idx; tx_info->port_id = pi->port_id; + tx_info->prev_ack = 0; + tx_info->prev_win = 0; tx_info->rx_qid = chcr_get_first_rx_qid(adap); if (unlikely(tx_info->rx_qid < 0)) - goto out2; + goto free_tx_info; tx_info->prev_seq = start_offload_tcp_sn; tx_info->tcp_start_seq_number = start_offload_tcp_sn; @@ -506,18 +435,22 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk, /* save crypto keys */ ret = chcr_ktls_save_keys(tx_info, crypto_info, direction); if (ret < 0) - goto out2; + goto free_tx_info; /* get peer ip */ if (sk->sk_family == AF_INET) { memcpy(daaddr, &sk->sk_daddr, 4); + tx_info->ip_family = AF_INET; #if IS_ENABLED(CONFIG_IPV6) } else { if (!sk->sk_ipv6only && - ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED) + ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED) { memcpy(daaddr, &sk->sk_daddr, 4); - else + tx_info->ip_family = AF_INET; + } else { memcpy(daaddr, sk->sk_v6_daddr.in6_u.u6_addr8, 16); + tx_info->ip_family = AF_INET6; + } #endif } @@ -525,13 +458,13 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk, dst = sk_dst_get(sk); if (!dst) { pr_err("DST entry not found\n"); - goto out2; + goto free_tx_info; } n = dst_neigh_lookup(dst, daaddr); if (!n || !n->dev) { pr_err("neighbour not found\n"); dst_release(dst); - goto out2; + goto free_tx_info; } tx_info->l2te = cxgb4_l2t_get(adap->l2t, n, n->dev, 0); @@ -540,31 +473,86 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk, if (!tx_info->l2te) { pr_err("l2t entry not found\n"); - goto out2; + goto free_tx_info; } - tx_ctx->chcr_info = tx_info; + /* Driver shouldn't be removed until any single connection exists */ + if (!try_module_get(THIS_MODULE)) + goto free_l2t; + init_completion(&tx_info->completion); /* create a filter and call cxgb4_l2t_send to send the packet out, which * will take care of updating l2t entry in hw if not already done. */ - ret = chcr_setup_connection(sk, tx_info); - if (ret) - goto out2; + tx_info->open_state = CH_KTLS_OPEN_PENDING; - /* Driver shouldn't be removed until any single connection exists */ - if (!try_module_get(THIS_MODULE)) { - ret = -EINVAL; - goto out2; + if (chcr_setup_connection(sk, tx_info)) + goto put_module; + + /* Wait for reply */ + wait_for_completion_timeout(&tx_info->completion, 30 * HZ); + spin_lock_bh(&tx_info->lock); + if (tx_info->open_state) { + /* need to wait for hw response, can't free tx_info yet. */ + if (tx_info->open_state == CH_KTLS_OPEN_PENDING) + tx_info->pending_close = true; + /* free the lock after the cleanup */ + goto put_module; } + spin_unlock_bh(&tx_info->lock); + + /* initialize tcb */ + reinit_completion(&tx_info->completion); + /* mark it pending for hw response */ + tx_info->open_state = CH_KTLS_OPEN_PENDING; + + if (chcr_init_tcb_fields(tx_info)) + goto free_tid; + + /* Wait for reply */ + wait_for_completion_timeout(&tx_info->completion, 30 * HZ); + spin_lock_bh(&tx_info->lock); + if (tx_info->open_state) { + /* need to wait for hw response, can't free tx_info yet. */ + tx_info->pending_close = true; + /* free the lock after cleanup */ + goto free_tid; + } + spin_unlock_bh(&tx_info->lock); + + if (!cxgb4_check_l2t_valid(tx_info->l2te)) + goto free_tid; + + atomic64_inc(&adap->ch_ktls_stats.ktls_tx_ctx); + tx_ctx->chcr_info = tx_info; - atomic64_inc(&adap->ch_ktls_stats.ktls_tx_connection_open); return 0; -out2: - kvfree(tx_info); + +free_tid: + chcr_ktls_mark_tcb_close(tx_info); +#if IS_ENABLED(CONFIG_IPV6) + /* clear clip entry */ + if (tx_info->ip_family == AF_INET6) + cxgb4_clip_release(netdev, (const u32 *) + &sk->sk_v6_rcv_saddr, + 1); +#endif + cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan, + tx_info->tid, tx_info->ip_family); + +put_module: + /* release module refcount */ + module_put(THIS_MODULE); +free_l2t: + cxgb4_l2t_release(tx_info->l2te); +free_tx_info: + if (tx_info->pending_close) + spin_unlock_bh(&tx_info->lock); + else + kvfree(tx_info); out: atomic64_inc(&adap->ch_ktls_stats.ktls_tx_connection_fail); - return ret; + return -1; } /* @@ -627,20 +615,39 @@ static int chcr_ktls_cpl_act_open_rpl(struct adapter *adap, tx_info = lookup_atid(t, atid); if (!tx_info || tx_info->atid != atid) { - pr_err("tx_info or atid is not correct\n"); + pr_err("%s: incorrect tx_info or atid\n", __func__); return -1; } + cxgb4_free_atid(t, atid); + tx_info->atid = -1; + + spin_lock(&tx_info->lock); + /* HW response is very close, finish pending cleanup */ + if (tx_info->pending_close) { + spin_unlock(&tx_info->lock); + if (!status) { + /* it's a late success, tcb status is establised, + * mark it close. + */ + chcr_ktls_mark_tcb_close(tx_info); + cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan, + tid, tx_info->ip_family); + } + kvfree(tx_info); + return 0; + } + if (!status) { tx_info->tid = tid; cxgb4_insert_tid(t, tx_info, tx_info->tid, tx_info->ip_family); - - cxgb4_free_atid(t, atid); - tx_info->atid = -1; - /* update the connection state */ - chcr_ktls_update_connection_state(tx_info, - KTLS_CONN_ACT_OPEN_RPL); + tx_info->open_state = CH_KTLS_OPEN_SUCCESS; + } else { + tx_info->open_state = CH_KTLS_OPEN_FAILURE; } + spin_unlock(&tx_info->lock); + + complete(&tx_info->completion); return 0; } @@ -658,12 +665,22 @@ static int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input) t = &adap->tids; tx_info = lookup_tid(t, tid); + if (!tx_info || tx_info->tid != tid) { - pr_err("tx_info or atid is not correct\n"); + pr_err("%s: incorrect tx_info or tid\n", __func__); return -1; } - /* update the connection state */ - chcr_ktls_update_connection_state(tx_info, KTLS_CONN_SET_TCB_RPL); + + spin_lock(&tx_info->lock); + if (tx_info->pending_close) { + spin_unlock(&tx_info->lock); + kvfree(tx_info); + return 0; + } + tx_info->open_state = false; + spin_unlock(&tx_info->lock); + + complete(&tx_info->completion); return 0; } @@ -1845,7 +1862,6 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev) u32 tls_end_offset, tcp_seq; struct tls_context *tls_ctx; struct sk_buff *local_skb; - int new_connection_state; struct sge_eth_txq *q; struct adapter *adap; unsigned long flags; @@ -1868,15 +1884,6 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev) if (unlikely(!tx_info)) goto out; - /* check the connection state, we don't need to pass new connection - * state, state machine will check and update the new state if it is - * stuck due to responses not received from HW. - * Start the tx handling only if state is KTLS_CONN_TX_READY. - */ - new_connection_state = chcr_ktls_update_connection_state(tx_info, 0); - if (new_connection_state != KTLS_CONN_TX_READY) - goto out; - /* don't touch the original skb, make a new skb to extract each records * and send them separately. */ diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h index 4ae6ae38c406..c1651b1431a0 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h +++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.h @@ -27,22 +27,20 @@ #define CHCR_KTLS_WR_SIZE (CHCR_PLAIN_TX_DATA_LEN +\ sizeof(struct cpl_tx_sec_pdu)) -enum chcr_ktls_conn_state { - KTLS_CONN_CLOSED, - KTLS_CONN_ACT_OPEN_REQ, - KTLS_CONN_ACT_OPEN_RPL, - KTLS_CONN_SET_TCB_REQ, - KTLS_CONN_SET_TCB_RPL, - KTLS_CONN_TX_READY, +enum ch_ktls_open_state { + CH_KTLS_OPEN_SUCCESS = 0, + CH_KTLS_OPEN_PENDING = 1, + CH_KTLS_OPEN_FAILURE = 2, }; struct chcr_ktls_info { struct sock *sk; - spinlock_t lock; /* state machine lock */ + spinlock_t lock; /* lock for pending_close */ struct ktls_key_ctx key_ctx; struct adapter *adap; struct l2t_entry *l2te; struct net_device *netdev; + struct completion completion; u64 iv; u64 record_no; int tid; @@ -58,13 +56,14 @@ struct chcr_ktls_info { u32 tcp_start_seq_number; u32 scmd0_short_seqno_numivs; u32 scmd0_short_ivgen_hdrlen; - enum chcr_ktls_conn_state connection_state; u16 prev_win; u8 tx_chan; u8 smt_idx; u8 port_id; u8 ip_family; u8 first_qset; + enum ch_ktls_open_state open_state; + bool pending_close; }; struct chcr_ktls_ofld_ctx_tx { From patchwork Tue Sep 29 17:44:24 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Rohit Maheshwari X-Patchwork-Id: 259918 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-12.8 required=3.0 tests=BAYES_00, HEADER_FROM_DIFFERENT_DOMAINS, INCLUDES_PATCH, MAILING_LIST_MULTI, SIGNED_OFF_BY, SPF_HELO_NONE,SPF_PASS,URIBL_BLOCKED,USER_AGENT_GIT autolearn=ham autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id 9AE06C47420 for ; Tue, 29 Sep 2020 17:44:45 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by mail.kernel.org (Postfix) with ESMTP id 598FC208B8 for ; Tue, 29 Sep 2020 17:44:45 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1730218AbgI2Roo (ORCPT ); Tue, 29 Sep 2020 13:44:44 -0400 Received: from stargate.chelsio.com ([12.32.117.8]:31337 "EHLO stargate.chelsio.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1730165AbgI2Ron (ORCPT ); Tue, 29 Sep 2020 13:44:43 -0400 Received: from localhost.localdomain (redhouse.blr.asicdesigners.com [10.193.185.57]) by stargate.chelsio.com (8.13.8/8.13.8) with ESMTP id 08THiVJx024825; Tue, 29 Sep 2020 10:44:38 -0700 From: Rohit Maheshwari To: kuba@kernel.org, netdev@vger.kernel.org, davem@davemloft.net Cc: secdev@chelsio.com, Rohit Maheshwari Subject: [net-next v3 2/3] cxgb4: Avoid log flood Date: Tue, 29 Sep 2020 23:14:24 +0530 Message-Id: <20200929174425.12256-3-rohitm@chelsio.com> X-Mailer: git-send-email 2.18.1 In-Reply-To: <20200929174425.12256-1-rohitm@chelsio.com> References: <20200929174425.12256-1-rohitm@chelsio.com> Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org Changing these logs to dynamic debugs. If issue is seen, these logs can be enabled at run time. Signed-off-by: Rohit Maheshwari --- drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c index b154190e1ee2..743af9e654aa 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c @@ -690,8 +690,8 @@ int cxgb4_set_ktls_feature(struct adapter *adap, bool enable) * ULD is/are already active, return failure. */ if (cxgb4_uld_in_use(adap)) { - dev_warn(adap->pdev_dev, - "ULD connections (tid/stid) active. Can't enable kTLS\n"); + dev_dbg(adap->pdev_dev, + "ULD connections (tid/stid) active. Can't enable kTLS\n"); return -EINVAL; } ret = t4_set_params(adap, adap->mbox, adap->pf, @@ -699,7 +699,7 @@ int cxgb4_set_ktls_feature(struct adapter *adap, bool enable) if (ret) return ret; refcount_set(&adap->chcr_ktls.ktls_refcount, 1); - pr_info("kTLS has been enabled. Restrictions placed on ULD support\n"); + pr_debug("kTLS has been enabled. Restrictions placed on ULD support\n"); } else { /* ktls settings already up, just increment refcount. */ refcount_inc(&adap->chcr_ktls.ktls_refcount); @@ -716,7 +716,7 @@ int cxgb4_set_ktls_feature(struct adapter *adap, bool enable) 0, 1, ¶ms, ¶ms); if (ret) return ret; - pr_info("kTLS is disabled. Restrictions on ULD support removed\n"); + pr_debug("kTLS is disabled. Restrictions on ULD support removed\n"); } } From patchwork Tue Sep 29 17:44:25 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Rohit Maheshwari X-Patchwork-Id: 289376 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-12.8 required=3.0 tests=BAYES_00, HEADER_FROM_DIFFERENT_DOMAINS, INCLUDES_PATCH, MAILING_LIST_MULTI, SIGNED_OFF_BY, SPF_HELO_NONE,SPF_PASS,URIBL_BLOCKED,USER_AGENT_GIT autolearn=ham autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id D4B88C4727C for ; Tue, 29 Sep 2020 17:44:49 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by mail.kernel.org (Postfix) with ESMTP id 85116208B8 for ; Tue, 29 Sep 2020 17:44:49 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1730239AbgI2Ros (ORCPT ); Tue, 29 Sep 2020 13:44:48 -0400 Received: from stargate.chelsio.com ([12.32.117.8]:39004 "EHLO stargate.chelsio.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1730165AbgI2Roq (ORCPT ); Tue, 29 Sep 2020 13:44:46 -0400 Received: from localhost.localdomain (redhouse.blr.asicdesigners.com [10.193.185.57]) by stargate.chelsio.com (8.13.8/8.13.8) with ESMTP id 08THiVK0024825; Tue, 29 Sep 2020 10:44:41 -0700 From: Rohit Maheshwari To: kuba@kernel.org, netdev@vger.kernel.org, davem@davemloft.net Cc: secdev@chelsio.com, Rohit Maheshwari Subject: [net-next v3 3/3] cxgb4/ch_ktls: ktls stats are added at port level Date: Tue, 29 Sep 2020 23:14:25 +0530 Message-Id: <20200929174425.12256-4-rohitm@chelsio.com> X-Mailer: git-send-email 2.18.1 In-Reply-To: <20200929174425.12256-1-rohitm@chelsio.com> References: <20200929174425.12256-1-rohitm@chelsio.com> Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org All the ktls stats were at adapter level, but now changing it to port level. Fixes: 62370a4f346d ("cxgb4/chcr: Add ipv6 support and statistics") Signed-off-by: Rohit Maheshwari --- .../ethernet/chelsio/cxgb4/cxgb4_debugfs.c | 35 ++++++------- .../ethernet/chelsio/cxgb4/cxgb4_ethtool.c | 50 +++++++++++++------ .../net/ethernet/chelsio/cxgb4/cxgb4_uld.h | 21 +++++--- .../chelsio/inline_crypto/ch_ktls/chcr_ktls.c | 28 +++++++---- 4 files changed, 80 insertions(+), 54 deletions(-) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index 5491a41fd1be..0273f40b85f7 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -3527,6 +3527,10 @@ DEFINE_SHOW_ATTRIBUTE(meminfo); static int chcr_stats_show(struct seq_file *seq, void *v) { +#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE) + struct ch_ktls_port_stats_debug *ktls_port; + int i = 0; +#endif struct adapter *adap = seq->private; seq_puts(seq, "Chelsio Crypto Accelerator Stats \n"); @@ -3557,18 +3561,6 @@ static int chcr_stats_show(struct seq_file *seq, void *v) seq_puts(seq, "\nChelsio KTLS Crypto Accelerator Stats\n"); seq_printf(seq, "Tx TLS offload refcount: %20u\n", refcount_read(&adap->chcr_ktls.ktls_refcount)); - seq_printf(seq, "Tx HW offload contexts added: %20llu\n", - atomic64_read(&adap->ch_ktls_stats.ktls_tx_ctx)); - seq_printf(seq, "Tx connection created: %20llu\n", - atomic64_read(&adap->ch_ktls_stats.ktls_tx_connection_open)); - seq_printf(seq, "Tx connection failed: %20llu\n", - atomic64_read(&adap->ch_ktls_stats.ktls_tx_connection_fail)); - seq_printf(seq, "Tx connection closed: %20llu\n", - atomic64_read(&adap->ch_ktls_stats.ktls_tx_connection_close)); - seq_printf(seq, "Packets passed for encryption : %20llu\n", - atomic64_read(&adap->ch_ktls_stats.ktls_tx_encrypted_packets)); - seq_printf(seq, "Bytes passed for encryption : %20llu\n", - atomic64_read(&adap->ch_ktls_stats.ktls_tx_encrypted_bytes)); seq_printf(seq, "Tx records send: %20llu\n", atomic64_read(&adap->ch_ktls_stats.ktls_tx_send_records)); seq_printf(seq, "Tx partial start of records: %20llu\n", @@ -3581,14 +3573,17 @@ static int chcr_stats_show(struct seq_file *seq, void *v) atomic64_read(&adap->ch_ktls_stats.ktls_tx_complete_pkts)); seq_printf(seq, "TX trim pkts : %20llu\n", atomic64_read(&adap->ch_ktls_stats.ktls_tx_trimmed_pkts)); - seq_printf(seq, "Tx out of order packets: %20llu\n", - atomic64_read(&adap->ch_ktls_stats.ktls_tx_ooo)); - seq_printf(seq, "Tx drop pkts before HW offload: %20llu\n", - atomic64_read(&adap->ch_ktls_stats.ktls_tx_skip_no_sync_data)); - seq_printf(seq, "Tx drop not synced packets: %20llu\n", - atomic64_read(&adap->ch_ktls_stats.ktls_tx_drop_no_sync_data)); - seq_printf(seq, "Tx drop bypass req: %20llu\n", - atomic64_read(&adap->ch_ktls_stats.ktls_tx_drop_bypass_req)); + while (i < MAX_NPORTS) { + ktls_port = &adap->ch_ktls_stats.ktls_port[i]; + seq_printf(seq, "Port %d\n", i); + seq_printf(seq, "Tx connection created: %20llu\n", + atomic64_read(&ktls_port->ktls_tx_connection_open)); + seq_printf(seq, "Tx connection failed: %20llu\n", + atomic64_read(&ktls_port->ktls_tx_connection_fail)); + seq_printf(seq, "Tx connection closed: %20llu\n", + atomic64_read(&ktls_port->ktls_tx_connection_close)); + i++; + } #endif return 0; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index 8c6c9bedcba1..61ea3ec5c3fc 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -117,14 +117,6 @@ static const char stats_strings[][ETH_GSTRING_LEN] = { "vlan_insertions ", "gro_packets ", "gro_merged ", -}; - -static char adapter_stats_strings[][ETH_GSTRING_LEN] = { - "db_drop ", - "db_full ", - "db_empty ", - "write_coal_success ", - "write_coal_fail ", #if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE) "tx_tls_encrypted_packets", "tx_tls_encrypted_bytes ", @@ -136,6 +128,14 @@ static char adapter_stats_strings[][ETH_GSTRING_LEN] = { #endif }; +static char adapter_stats_strings[][ETH_GSTRING_LEN] = { + "db_drop ", + "db_full ", + "db_empty ", + "write_coal_success ", + "write_coal_fail ", +}; + static char loopback_stats_strings[][ETH_GSTRING_LEN] = { "-------Loopback----------- ", "octets_ok ", @@ -257,14 +257,6 @@ struct queue_port_stats { u64 vlan_ins; u64 gro_pkts; u64 gro_merged; -}; - -struct adapter_stats { - u64 db_drop; - u64 db_full; - u64 db_empty; - u64 wc_success; - u64 wc_fail; #if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE) u64 tx_tls_encrypted_packets; u64 tx_tls_encrypted_bytes; @@ -276,12 +268,23 @@ struct adapter_stats { #endif }; +struct adapter_stats { + u64 db_drop; + u64 db_full; + u64 db_empty; + u64 wc_success; + u64 wc_fail; +}; + static void collect_sge_port_stats(const struct adapter *adap, const struct port_info *p, struct queue_port_stats *s) { const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset]; const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset]; +#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE) + const struct ch_ktls_port_stats_debug *ktls_stats; +#endif struct sge_eohw_txq *eohw_tx; unsigned int i; @@ -306,6 +309,21 @@ static void collect_sge_port_stats(const struct adapter *adap, s->vlan_ins += eohw_tx->vlan_ins; } } +#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE) + ktls_stats = &adap->ch_ktls_stats.ktls_port[p->port_id]; + s->tx_tls_encrypted_packets = + atomic64_read(&ktls_stats->ktls_tx_encrypted_packets); + s->tx_tls_encrypted_bytes = + atomic64_read(&ktls_stats->ktls_tx_encrypted_bytes); + s->tx_tls_ctx = atomic64_read(&ktls_stats->ktls_tx_ctx); + s->tx_tls_ooo = atomic64_read(&ktls_stats->ktls_tx_ooo); + s->tx_tls_skip_no_sync_data = + atomic64_read(&ktls_stats->ktls_tx_skip_no_sync_data); + s->tx_tls_drop_no_sync_data = + atomic64_read(&ktls_stats->ktls_tx_drop_no_sync_data); + s->tx_tls_drop_bypass_req = + atomic64_read(&ktls_stats->ktls_tx_drop_bypass_req); +#endif } static void collect_adapter_stats(struct adapter *adap, struct adapter_stats *s) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h index ea2fabbdd934..b169776ab484 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h @@ -44,6 +44,7 @@ #include "cxgb4.h" #define MAX_ULD_QSETS 16 +#define MAX_ULD_NPORTS 4 /* CPL message priority levels */ enum { @@ -365,17 +366,10 @@ struct cxgb4_virt_res { /* virtualized HW resources */ }; #if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE) -struct ch_ktls_stats_debug { +struct ch_ktls_port_stats_debug { atomic64_t ktls_tx_connection_open; atomic64_t ktls_tx_connection_fail; atomic64_t ktls_tx_connection_close; - atomic64_t ktls_tx_send_records; - atomic64_t ktls_tx_end_pkts; - atomic64_t ktls_tx_start_pkts; - atomic64_t ktls_tx_middle_pkts; - atomic64_t ktls_tx_retransmit_pkts; - atomic64_t ktls_tx_complete_pkts; - atomic64_t ktls_tx_trimmed_pkts; atomic64_t ktls_tx_encrypted_packets; atomic64_t ktls_tx_encrypted_bytes; atomic64_t ktls_tx_ctx; @@ -384,6 +378,17 @@ struct ch_ktls_stats_debug { atomic64_t ktls_tx_drop_no_sync_data; atomic64_t ktls_tx_drop_bypass_req; }; + +struct ch_ktls_stats_debug { + struct ch_ktls_port_stats_debug ktls_port[MAX_ULD_NPORTS]; + atomic64_t ktls_tx_send_records; + atomic64_t ktls_tx_end_pkts; + atomic64_t ktls_tx_start_pkts; + atomic64_t ktls_tx_middle_pkts; + atomic64_t ktls_tx_retransmit_pkts; + atomic64_t ktls_tx_complete_pkts; + atomic64_t ktls_tx_trimmed_pkts; +}; #endif struct chcr_stats_debug { diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c index c8db78b6775e..5195f692f14d 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c +++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c @@ -337,6 +337,7 @@ static void chcr_ktls_dev_del(struct net_device *netdev, struct chcr_ktls_ofld_ctx_tx *tx_ctx = chcr_get_ktls_tx_context(tls_ctx); struct chcr_ktls_info *tx_info = tx_ctx->chcr_info; + struct ch_ktls_port_stats_debug *port_stats; if (!tx_info) return; @@ -361,7 +362,8 @@ static void chcr_ktls_dev_del(struct net_device *netdev, tx_info->tid, tx_info->ip_family); } - atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_connection_close); + port_stats = &tx_info->adap->ch_ktls_stats.ktls_port[tx_info->port_id]; + atomic64_inc(&port_stats->ktls_tx_connection_close); kvfree(tx_info); tx_ctx->chcr_info = NULL; /* release module refcount */ @@ -383,6 +385,7 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk, u32 start_offload_tcp_sn) { struct tls_context *tls_ctx = tls_get_ctx(sk); + struct ch_ktls_port_stats_debug *port_stats; struct chcr_ktls_ofld_ctx_tx *tx_ctx; struct chcr_ktls_info *tx_info; struct dst_entry *dst; @@ -396,8 +399,9 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk, pi = netdev_priv(netdev); adap = pi->adapter; + port_stats = &adap->ch_ktls_stats.ktls_port[pi->port_id]; + atomic64_inc(&port_stats->ktls_tx_connection_open); - atomic64_inc(&adap->ch_ktls_stats.ktls_tx_connection_open); if (direction == TLS_OFFLOAD_CTX_DIR_RX) { pr_err("not expecting for RX direction\n"); goto out; @@ -523,7 +527,7 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk, if (!cxgb4_check_l2t_valid(tx_info->l2te)) goto free_tid; - atomic64_inc(&adap->ch_ktls_stats.ktls_tx_ctx); + atomic64_inc(&port_stats->ktls_tx_ctx); tx_ctx->chcr_info = tx_info; return 0; @@ -551,7 +555,7 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk, else kvfree(tx_info); out: - atomic64_inc(&adap->ch_ktls_stats.ktls_tx_connection_fail); + atomic64_inc(&port_stats->ktls_tx_connection_fail); return -1; } @@ -782,6 +786,7 @@ static int chcr_ktls_xmit_tcb_cpls(struct chcr_ktls_info *tx_info, u64 tcp_ack, u64 tcp_win) { bool first_wr = ((tx_info->prev_ack == 0) && (tx_info->prev_win == 0)); + struct ch_ktls_port_stats_debug *port_stats; u32 len, cpl = 0, ndesc, wr_len; struct fw_ulptx_wr *wr; int credits; @@ -815,12 +820,14 @@ static int chcr_ktls_xmit_tcb_cpls(struct chcr_ktls_info *tx_info, /* reset snd una if it's a re-transmit pkt */ if (tcp_seq != tx_info->prev_seq) { /* reset snd_una */ + port_stats = + &tx_info->adap->ch_ktls_stats.ktls_port[tx_info->port_id]; pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos, TCB_SND_UNA_RAW_W, TCB_SND_UNA_RAW_V (TCB_SND_UNA_RAW_M), TCB_SND_UNA_RAW_V(0), 0); - atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_ooo); + atomic64_inc(&port_stats->ktls_tx_ooo); cpl++; } /* update ack */ @@ -1853,6 +1860,7 @@ static int chcr_short_record_handler(struct chcr_ktls_info *tx_info, /* nic tls TX handler */ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev) { + struct ch_ktls_port_stats_debug *port_stats; struct chcr_ktls_ofld_ctx_tx *tx_ctx; struct ch_ktls_stats_debug *stats; struct tcphdr *th = tcp_hdr(skb); @@ -1894,6 +1902,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev) adap = tx_info->adap; stats = &adap->ch_ktls_stats; + port_stats = &stats->ktls_port[tx_info->port_id]; qidx = skb->queue_mapping; q = &adap->sge.ethtxq[qidx + tx_info->first_qset]; @@ -1939,13 +1948,13 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev) */ if (unlikely(!record)) { spin_unlock_irqrestore(&tx_ctx->base.lock, flags); - atomic64_inc(&stats->ktls_tx_drop_no_sync_data); + atomic64_inc(&port_stats->ktls_tx_drop_no_sync_data); goto out; } if (unlikely(tls_record_is_start_marker(record))) { spin_unlock_irqrestore(&tx_ctx->base.lock, flags); - atomic64_inc(&stats->ktls_tx_skip_no_sync_data); + atomic64_inc(&port_stats->ktls_tx_skip_no_sync_data); goto out; } @@ -2016,9 +2025,8 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev) } while (data_len > 0); tx_info->prev_seq = ntohl(th->seq) + skb->data_len; - - atomic64_inc(&stats->ktls_tx_encrypted_packets); - atomic64_add(skb->data_len, &stats->ktls_tx_encrypted_bytes); + atomic64_inc(&port_stats->ktls_tx_encrypted_packets); + atomic64_add(skb->data_len, &port_stats->ktls_tx_encrypted_bytes); /* tcp finish is set, send a separate tcp msg including all the options * as well.