diff mbox series

[4/5] net: thunderbolt: Enable full end-to-end flow control

Message ID 20220830153250.15496-5-mika.westerberg@linux.intel.com
State New
Headers show
Series thunderbolt: net: Enable full end-to-end flow control | expand

Commit Message

Mika Westerberg Aug. 30, 2022, 3:32 p.m. UTC
USB4NET protocol allows the networking drivers to take advantage of
end-to-end flow control supported by the USB4 host interface. This
should prevent the receiving side from dropping network packets.

In adddition add a module parameter that can be used to turn this off
just in case it causes problems.

Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com>
---
 drivers/net/thunderbolt.c | 29 ++++++++++++++++++++---------
 1 file changed, 20 insertions(+), 9 deletions(-)
diff mbox series

Patch

diff --git a/drivers/net/thunderbolt.c b/drivers/net/thunderbolt.c
index ab3f04562980..8e272d2a61e5 100644
--- a/drivers/net/thunderbolt.c
+++ b/drivers/net/thunderbolt.c
@@ -30,6 +30,7 @@ 
 #define TBNET_RING_SIZE		256
 #define TBNET_LOGIN_RETRIES	60
 #define TBNET_LOGOUT_RETRIES	10
+#define TBNET_E2E		BIT(0)
 #define TBNET_MATCH_FRAGS_ID	BIT(1)
 #define TBNET_64K_FRAMES	BIT(2)
 #define TBNET_MAX_MTU		SZ_64K
@@ -209,6 +210,10 @@  static const uuid_t tbnet_svc_uuid =
 
 static struct tb_property_dir *tbnet_dir;
 
+static bool tbnet_e2e = true;
+module_param_named(e2e, tbnet_e2e, bool, 0444);
+MODULE_PARM_DESC(e2e, "USB4NET full end-to-end flow control (default: true)");
+
 static void tbnet_fill_header(struct thunderbolt_ip_header *hdr, u64 route,
 	u8 sequence, const uuid_t *initiator_uuid, const uuid_t *target_uuid,
 	enum thunderbolt_ip_type type, size_t size, u32 command_id)
@@ -873,6 +878,7 @@  static int tbnet_open(struct net_device *dev)
 	struct tb_xdomain *xd = net->xd;
 	u16 sof_mask, eof_mask;
 	struct tb_ring *ring;
+	unsigned int flags;
 	int hopid;
 
 	netif_carrier_off(dev);
@@ -897,9 +903,14 @@  static int tbnet_open(struct net_device *dev)
 	sof_mask = BIT(TBIP_PDF_FRAME_START);
 	eof_mask = BIT(TBIP_PDF_FRAME_END);
 
-	ring = tb_ring_alloc_rx(xd->tb->nhi, -1, TBNET_RING_SIZE,
-				RING_FLAG_FRAME, 0, sof_mask, eof_mask,
-				tbnet_start_poll, net);
+	flags = RING_FLAG_FRAME;
+	/* Only enable full E2E if the other end supports it too */
+	if (tbnet_e2e && net->svc->prtcstns & TBNET_E2E)
+		flags |= RING_FLAG_E2E;
+
+	ring = tb_ring_alloc_rx(xd->tb->nhi, -1, TBNET_RING_SIZE, flags,
+				net->tx_ring.ring->hop, sof_mask,
+				eof_mask, tbnet_start_poll, net);
 	if (!ring) {
 		netdev_err(dev, "failed to allocate Rx ring\n");
 		tb_ring_free(net->tx_ring.ring);
@@ -1362,6 +1373,7 @@  static struct tb_service_driver tbnet_driver = {
 
 static int __init tbnet_init(void)
 {
+	unsigned int flags;
 	int ret;
 
 	tbnet_dir = tb_property_create_dir(&tbnet_dir_uuid);
@@ -1371,12 +1383,11 @@  static int __init tbnet_init(void)
 	tb_property_add_immediate(tbnet_dir, "prtcid", 1);
 	tb_property_add_immediate(tbnet_dir, "prtcvers", 1);
 	tb_property_add_immediate(tbnet_dir, "prtcrevs", 1);
-	/* Currently only announce support for match frags ID (bit 1). Bit 0
-	 * is reserved for full E2E flow control which we do not support at
-	 * the moment.
-	 */
-	tb_property_add_immediate(tbnet_dir, "prtcstns",
-				  TBNET_MATCH_FRAGS_ID | TBNET_64K_FRAMES);
+
+	flags = TBNET_MATCH_FRAGS_ID | TBNET_64K_FRAMES;
+	if (tbnet_e2e)
+		flags |= TBNET_E2E;
+	tb_property_add_immediate(tbnet_dir, "prtcstns", flags);
 
 	ret = tb_register_property_dir("network", tbnet_dir);
 	if (ret) {