Message ID | 20201119083024.119566-8-bjorn.topel@gmail.com |
---|---|
State | Superseded |
Headers | show |
Series | [bpf-next,v3,01/10] net: introduce preferred busy-polling | expand |
On Thu, Nov 19, 2020 at 9:34 AM Björn Töpel <bjorn.topel@gmail.com> wrote: > > From: Björn Töpel <bjorn.topel@intel.com> > > Start using recvfrom() the rxdrop scenario. > > Signed-off-by: Björn Töpel <bjorn.topel@intel.com> > --- > samples/bpf/xdpsock_user.c | 8 ++++---- > 1 file changed, 4 insertions(+), 4 deletions(-) Acked-by: Magnus Karlsson <magnus.karlsson@intel.com> > diff --git a/samples/bpf/xdpsock_user.c b/samples/bpf/xdpsock_user.c > index 2567f0db5aca..f90111b95b2e 100644 > --- a/samples/bpf/xdpsock_user.c > +++ b/samples/bpf/xdpsock_user.c > @@ -1170,7 +1170,7 @@ static inline void complete_tx_only(struct xsk_socket_info *xsk, > } > } > > -static void rx_drop(struct xsk_socket_info *xsk, struct pollfd *fds) > +static void rx_drop(struct xsk_socket_info *xsk) > { > unsigned int rcvd, i; > u32 idx_rx = 0, idx_fq = 0; > @@ -1180,7 +1180,7 @@ static void rx_drop(struct xsk_socket_info *xsk, struct pollfd *fds) > if (!rcvd) { > if (xsk_ring_prod__needs_wakeup(&xsk->umem->fq)) { > xsk->app_stats.rx_empty_polls++; > - ret = poll(fds, num_socks, opt_timeout); > + recvfrom(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, NULL); > } > return; > } > @@ -1191,7 +1191,7 @@ static void rx_drop(struct xsk_socket_info *xsk, struct pollfd *fds) > exit_with_error(-ret); > if (xsk_ring_prod__needs_wakeup(&xsk->umem->fq)) { > xsk->app_stats.fill_fail_polls++; > - ret = poll(fds, num_socks, opt_timeout); > + recvfrom(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, NULL); > } > ret = xsk_ring_prod__reserve(&xsk->umem->fq, rcvd, &idx_fq); > } > @@ -1233,7 +1233,7 @@ static void rx_drop_all(void) > } > > for (i = 0; i < num_socks; i++) > - rx_drop(xsks[i], fds); > + rx_drop(xsks[i]); > > if (benchmark_done) > break; > -- > 2.27.0 >
diff --git a/samples/bpf/xdpsock_user.c b/samples/bpf/xdpsock_user.c index 2567f0db5aca..f90111b95b2e 100644 --- a/samples/bpf/xdpsock_user.c +++ b/samples/bpf/xdpsock_user.c @@ -1170,7 +1170,7 @@ static inline void complete_tx_only(struct xsk_socket_info *xsk, } } -static void rx_drop(struct xsk_socket_info *xsk, struct pollfd *fds) +static void rx_drop(struct xsk_socket_info *xsk) { unsigned int rcvd, i; u32 idx_rx = 0, idx_fq = 0; @@ -1180,7 +1180,7 @@ static void rx_drop(struct xsk_socket_info *xsk, struct pollfd *fds) if (!rcvd) { if (xsk_ring_prod__needs_wakeup(&xsk->umem->fq)) { xsk->app_stats.rx_empty_polls++; - ret = poll(fds, num_socks, opt_timeout); + recvfrom(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, NULL); } return; } @@ -1191,7 +1191,7 @@ static void rx_drop(struct xsk_socket_info *xsk, struct pollfd *fds) exit_with_error(-ret); if (xsk_ring_prod__needs_wakeup(&xsk->umem->fq)) { xsk->app_stats.fill_fail_polls++; - ret = poll(fds, num_socks, opt_timeout); + recvfrom(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, NULL); } ret = xsk_ring_prod__reserve(&xsk->umem->fq, rcvd, &idx_fq); } @@ -1233,7 +1233,7 @@ static void rx_drop_all(void) } for (i = 0; i < num_socks; i++) - rx_drop(xsks[i], fds); + rx_drop(xsks[i]); if (benchmark_done) break;