diff mbox series

[05/12] scsi: iscsi: Run recv path from workqueue

Message ID 20220308002747.122682-6-michael.christie@oracle.com
State Superseded
Headers show
Series misc iscsi patches | expand

Commit Message

Mike Christie March 8, 2022, 12:27 a.m. UTC
We don't always want to run the recv path from the network softirq
because when we have to have multiple sessions sharing the same CPUs some
sessions can eat up the napi softirq budget and affect other sessions or
users. This patch allows us to queue the recv handling to the iscsi
workqueue so we can have the scheduler/wq code try to balance the work and
CPU use across all sessions's  worker threads.

Signed-off-by: Mike Christie <michael.christie@oracle.com>
---
 drivers/scsi/iscsi_tcp.c | 62 +++++++++++++++++++++++++++++++---------
 drivers/scsi/iscsi_tcp.h |  2 ++
 2 files changed, 51 insertions(+), 13 deletions(-)

Comments

Lee Duncan March 18, 2022, 4:45 p.m. UTC | #1
On 3/7/22 16:27, Mike Christie wrote:
> We don't always want to run the recv path from the network softirq
> because when we have to have multiple sessions sharing the same CPUs some
> sessions can eat up the napi softirq budget and affect other sessions or
> users. This patch allows us to queue the recv handling to the iscsi
> workqueue so we can have the scheduler/wq code try to balance the work and
> CPU use across all sessions's  worker threads.
> 
> Signed-off-by: Mike Christie <michael.christie@oracle.com>
> ---
>   drivers/scsi/iscsi_tcp.c | 62 +++++++++++++++++++++++++++++++---------
>   drivers/scsi/iscsi_tcp.h |  2 ++
>   2 files changed, 51 insertions(+), 13 deletions(-)
> 
> diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
> index f274a86d2ec0..261599938fc9 100644
> --- a/drivers/scsi/iscsi_tcp.c
> +++ b/drivers/scsi/iscsi_tcp.c
> @@ -52,6 +52,10 @@ static struct iscsi_transport iscsi_sw_tcp_transport;
>   static unsigned int iscsi_max_lun = ~0;
>   module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
>   
> +static bool iscsi_use_recv_wq;
> +module_param_named(use_recv_wq, iscsi_use_recv_wq, bool, 0644);
> +MODULE_PARM_DESC(use_recv_wq, "Set to true to read iSCSI data/headers from the iscsi_q workqueue. The default is false which will perform reads from the network softirq context.");

I'm just curious why you chose to make this a module parameter, leaving 
the current default.

> +
>   static int iscsi_sw_tcp_dbg;
>   module_param_named(debug_iscsi_tcp, iscsi_sw_tcp_dbg, int,
>   		   S_IRUGO | S_IWUSR);
> @@ -122,20 +126,13 @@ static inline int iscsi_sw_sk_state_check(struct sock *sk)
>   	return 0;
>   }
>   
> -static void iscsi_sw_tcp_data_ready(struct sock *sk)
> +static void iscsi_sw_tcp_recv_data(struct iscsi_conn *conn)
>   {
> -	struct iscsi_conn *conn;
> -	struct iscsi_tcp_conn *tcp_conn;
> +	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
> +	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
> +	struct sock *sk = tcp_sw_conn->sock->sk;
>   	read_descriptor_t rd_desc;
>   
> -	read_lock_bh(&sk->sk_callback_lock);
> -	conn = sk->sk_user_data;
> -	if (!conn) {
> -		read_unlock_bh(&sk->sk_callback_lock);
> -		return;
> -	}
> -	tcp_conn = conn->dd_data;
> -
>   	/*
>   	 * Use rd_desc to pass 'conn' to iscsi_tcp_recv.
>   	 * We set count to 1 because we want the network layer to
> @@ -144,13 +141,48 @@ static void iscsi_sw_tcp_data_ready(struct sock *sk)
>   	 */
>   	rd_desc.arg.data = conn;
>   	rd_desc.count = 1;
> -	tcp_read_sock(sk, &rd_desc, iscsi_sw_tcp_recv);
>   
> -	iscsi_sw_sk_state_check(sk);
> +	tcp_read_sock(sk, &rd_desc, iscsi_sw_tcp_recv);
>   
>   	/* If we had to (atomically) map a highmem page,
>   	 * unmap it now. */
>   	iscsi_tcp_segment_unmap(&tcp_conn->in.segment);
> +
> +	iscsi_sw_sk_state_check(sk);
> +}
> +
> +static void iscsi_sw_tcp_recv_data_work(struct work_struct *work)
> +{
> +	struct iscsi_conn *conn = container_of(work, struct iscsi_conn,
> +					       recvwork);
> +	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
> +	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
> +	struct sock *sk = tcp_sw_conn->sock->sk;
> +
> +	lock_sock(sk);
> +	iscsi_sw_tcp_recv_data(conn);
> +	release_sock(sk);
> +}
> +
> +static void iscsi_sw_tcp_data_ready(struct sock *sk)
> +{
> +	struct iscsi_sw_tcp_conn *tcp_sw_conn;
> +	struct iscsi_tcp_conn *tcp_conn;
> +	struct iscsi_conn *conn;
> +
> +	read_lock_bh(&sk->sk_callback_lock);
> +	conn = sk->sk_user_data;
> +	if (!conn) {
> +		read_unlock_bh(&sk->sk_callback_lock);
> +		return;
> +	}
> +	tcp_conn = conn->dd_data;
> +	tcp_sw_conn = tcp_conn->dd_data;
> +
> +	if (tcp_sw_conn->queue_recv)
> +		iscsi_conn_queue_recv(conn);
> +	else
> +		iscsi_sw_tcp_recv_data(conn);
>   	read_unlock_bh(&sk->sk_callback_lock);
>   }
>   
> @@ -557,6 +589,8 @@ iscsi_sw_tcp_conn_create(struct iscsi_cls_session *cls_session,
>   	conn = cls_conn->dd_data;
>   	tcp_conn = conn->dd_data;
>   	tcp_sw_conn = tcp_conn->dd_data;
> +	INIT_WORK(&conn->recvwork, iscsi_sw_tcp_recv_data_work);
> +	tcp_sw_conn->queue_recv = iscsi_use_recv_wq;
>   
>   	tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
>   	if (IS_ERR(tfm))
> @@ -606,6 +640,8 @@ static void iscsi_sw_tcp_release_conn(struct iscsi_conn *conn)
>   	 */
>   	kernel_sock_shutdown(sock, SHUT_RDWR);
>   
> +	iscsi_suspend_rx(conn);
> +
>   	sock_hold(sock->sk);
>   	iscsi_sw_tcp_conn_restore_callbacks(conn);
>   	sock_put(sock->sk);
> diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
> index 791453195099..850a018aefb9 100644
> --- a/drivers/scsi/iscsi_tcp.h
> +++ b/drivers/scsi/iscsi_tcp.h
> @@ -28,6 +28,8 @@ struct iscsi_sw_tcp_send {
>   
>   struct iscsi_sw_tcp_conn {
>   	struct socket		*sock;
> +	struct work_struct	recvwork;
> +	bool			queue_recv;
>   
>   	struct iscsi_sw_tcp_send out;
>   	/* old values for socket callbacks */

Other than my question above, I'm fine with this.

Reviewed-by: Lee Duncan <lduncan@suse.com>
Mike Christie March 18, 2022, 10:11 p.m. UTC | #2
On 3/18/22 11:45 AM, Lee Duncan wrote:
> On 3/7/22 16:27, Mike Christie wrote:
>> We don't always want to run the recv path from the network softirq
>> because when we have to have multiple sessions sharing the same CPUs some
>> sessions can eat up the napi softirq budget and affect other sessions or
>> users. This patch allows us to queue the recv handling to the iscsi
>> workqueue so we can have the scheduler/wq code try to balance the work and
>> CPU use across all sessions's  worker threads.
>>
>> Signed-off-by: Mike Christie <michael.christie@oracle.com>
>> ---
>>   drivers/scsi/iscsi_tcp.c | 62 +++++++++++++++++++++++++++++++---------
>>   drivers/scsi/iscsi_tcp.h |  2 ++
>>   2 files changed, 51 insertions(+), 13 deletions(-)
>>
>> diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
>> index f274a86d2ec0..261599938fc9 100644
>> --- a/drivers/scsi/iscsi_tcp.c
>> +++ b/drivers/scsi/iscsi_tcp.c
>> @@ -52,6 +52,10 @@ static struct iscsi_transport iscsi_sw_tcp_transport;
>>   static unsigned int iscsi_max_lun = ~0;
>>   module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
>>   +static bool iscsi_use_recv_wq;
>> +module_param_named(use_recv_wq, iscsi_use_recv_wq, bool, 0644);
>> +MODULE_PARM_DESC(use_recv_wq, "Set to true to read iSCSI data/headers from the iscsi_q workqueue. The default is false which will perform reads from the network softirq context.");
> 
> I'm just curious why you chose to make this a module parameter, leaving the current default.

If you only have a couple sessions, running from the softirq can
be better can give you better perf sometimes. Users might have
pinned things where the xmit and recv paths are on different CPUs.
If we switch the default then it could cause a regression for those
users.

The modparam use is because users typically know how heavily they
will use iscsi beforehand. Like they know if they are only going
to be using a couple or 10 or 20 sessions for one or 2 apps vs 50+
for heavier storage use.
diff mbox series

Patch

diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index f274a86d2ec0..261599938fc9 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -52,6 +52,10 @@  static struct iscsi_transport iscsi_sw_tcp_transport;
 static unsigned int iscsi_max_lun = ~0;
 module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
 
+static bool iscsi_use_recv_wq;
+module_param_named(use_recv_wq, iscsi_use_recv_wq, bool, 0644);
+MODULE_PARM_DESC(use_recv_wq, "Set to true to read iSCSI data/headers from the iscsi_q workqueue. The default is false which will perform reads from the network softirq context.");
+
 static int iscsi_sw_tcp_dbg;
 module_param_named(debug_iscsi_tcp, iscsi_sw_tcp_dbg, int,
 		   S_IRUGO | S_IWUSR);
@@ -122,20 +126,13 @@  static inline int iscsi_sw_sk_state_check(struct sock *sk)
 	return 0;
 }
 
-static void iscsi_sw_tcp_data_ready(struct sock *sk)
+static void iscsi_sw_tcp_recv_data(struct iscsi_conn *conn)
 {
-	struct iscsi_conn *conn;
-	struct iscsi_tcp_conn *tcp_conn;
+	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
+	struct sock *sk = tcp_sw_conn->sock->sk;
 	read_descriptor_t rd_desc;
 
-	read_lock_bh(&sk->sk_callback_lock);
-	conn = sk->sk_user_data;
-	if (!conn) {
-		read_unlock_bh(&sk->sk_callback_lock);
-		return;
-	}
-	tcp_conn = conn->dd_data;
-
 	/*
 	 * Use rd_desc to pass 'conn' to iscsi_tcp_recv.
 	 * We set count to 1 because we want the network layer to
@@ -144,13 +141,48 @@  static void iscsi_sw_tcp_data_ready(struct sock *sk)
 	 */
 	rd_desc.arg.data = conn;
 	rd_desc.count = 1;
-	tcp_read_sock(sk, &rd_desc, iscsi_sw_tcp_recv);
 
-	iscsi_sw_sk_state_check(sk);
+	tcp_read_sock(sk, &rd_desc, iscsi_sw_tcp_recv);
 
 	/* If we had to (atomically) map a highmem page,
 	 * unmap it now. */
 	iscsi_tcp_segment_unmap(&tcp_conn->in.segment);
+
+	iscsi_sw_sk_state_check(sk);
+}
+
+static void iscsi_sw_tcp_recv_data_work(struct work_struct *work)
+{
+	struct iscsi_conn *conn = container_of(work, struct iscsi_conn,
+					       recvwork);
+	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
+	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
+	struct sock *sk = tcp_sw_conn->sock->sk;
+
+	lock_sock(sk);
+	iscsi_sw_tcp_recv_data(conn);
+	release_sock(sk);
+}
+
+static void iscsi_sw_tcp_data_ready(struct sock *sk)
+{
+	struct iscsi_sw_tcp_conn *tcp_sw_conn;
+	struct iscsi_tcp_conn *tcp_conn;
+	struct iscsi_conn *conn;
+
+	read_lock_bh(&sk->sk_callback_lock);
+	conn = sk->sk_user_data;
+	if (!conn) {
+		read_unlock_bh(&sk->sk_callback_lock);
+		return;
+	}
+	tcp_conn = conn->dd_data;
+	tcp_sw_conn = tcp_conn->dd_data;
+
+	if (tcp_sw_conn->queue_recv)
+		iscsi_conn_queue_recv(conn);
+	else
+		iscsi_sw_tcp_recv_data(conn);
 	read_unlock_bh(&sk->sk_callback_lock);
 }
 
@@ -557,6 +589,8 @@  iscsi_sw_tcp_conn_create(struct iscsi_cls_session *cls_session,
 	conn = cls_conn->dd_data;
 	tcp_conn = conn->dd_data;
 	tcp_sw_conn = tcp_conn->dd_data;
+	INIT_WORK(&conn->recvwork, iscsi_sw_tcp_recv_data_work);
+	tcp_sw_conn->queue_recv = iscsi_use_recv_wq;
 
 	tfm = crypto_alloc_ahash("crc32c", 0, CRYPTO_ALG_ASYNC);
 	if (IS_ERR(tfm))
@@ -606,6 +640,8 @@  static void iscsi_sw_tcp_release_conn(struct iscsi_conn *conn)
 	 */
 	kernel_sock_shutdown(sock, SHUT_RDWR);
 
+	iscsi_suspend_rx(conn);
+
 	sock_hold(sock->sk);
 	iscsi_sw_tcp_conn_restore_callbacks(conn);
 	sock_put(sock->sk);
diff --git a/drivers/scsi/iscsi_tcp.h b/drivers/scsi/iscsi_tcp.h
index 791453195099..850a018aefb9 100644
--- a/drivers/scsi/iscsi_tcp.h
+++ b/drivers/scsi/iscsi_tcp.h
@@ -28,6 +28,8 @@  struct iscsi_sw_tcp_send {
 
 struct iscsi_sw_tcp_conn {
 	struct socket		*sock;
+	struct work_struct	recvwork;
+	bool			queue_recv;
 
 	struct iscsi_sw_tcp_send out;
 	/* old values for socket callbacks */