diff mbox

linux-generic: sched fix polling input queue

Message ID 1427895059-13898-1-git-send-email-maxim.uvarov@linaro.org
State New
Headers show

Commit Message

Maxim Uvarov April 1, 2015, 1:30 p.m. UTC
Commit:
 8fa64fb9fc4e43fbd9d3c226ed89229863bdb771
 linux-generic: scheduler: restructured queue and pktio integration
Implement race with schedule termination and polling input queue.
This patch locks pktio while doing poll to prevent destroy linked
queue in the middle of this poll.

Signed-off-by: Maxim Uvarov <maxim.uvarov@linaro.org>
---
 .../linux-generic/include/odp_packet_io_internal.h |  2 ++
 platform/linux-generic/odp_packet_io.c             | 39 +++++++++++++++++-----
 2 files changed, 33 insertions(+), 8 deletions(-)

Comments

Stuart Haslam April 1, 2015, 1:57 p.m. UTC | #1
On Wed, Apr 01, 2015 at 04:30:59PM +0300, Maxim Uvarov wrote:
> Commit:
>  8fa64fb9fc4e43fbd9d3c226ed89229863bdb771
>  linux-generic: scheduler: restructured queue and pktio integration
> Implement race with schedule termination and polling input queue.
> This patch locks pktio while doing poll to prevent destroy linked
> queue in the middle of this poll.
> 

Admittedly I've not looked in detail at the terminate sequence after the
scheduler changes, so don't really understand what you're fixing, but
this feels like a workaround rather than a fix. Shouldn't the pktin
queue have been removed from the scheduler before it's closed? What's
the sequence that leads to the problem?

Another reason I'm not keen on this change is that those locks really
need to go as they serialise all send/recv calls (per pktio) and kill
scalability. As far as I can see they're only there to protect against
the application closing a pktio handle while another thread is using it,
but IMO we should consider that an application error. The only
linux-generic implementation that needs them is MMAP, and that can be
fixed. So it would be good to find a solution for this problem that
doesn't depend on those locks.

--
Stuart.
Maxim Uvarov April 1, 2015, 2:47 p.m. UTC | #2
On 04/01/15 16:57, Stuart Haslam wrote:
> On Wed, Apr 01, 2015 at 04:30:59PM +0300, Maxim Uvarov wrote:
>> Commit:
>>   8fa64fb9fc4e43fbd9d3c226ed89229863bdb771
>>   linux-generic: scheduler: restructured queue and pktio integration
>> Implement race with schedule termination and polling input queue.
>> This patch locks pktio while doing poll to prevent destroy linked
>> queue in the middle of this poll.
>>
> Admittedly I've not looked in detail at the terminate sequence after the
> scheduler changes, so don't really understand what you're fixing, but
> this feels like a workaround rather than a fix. Shouldn't the pktin
> queue have been removed from the scheduler before it's closed? What's
> the sequence that leads to the problem?
as I understand that right one thread goes to
schedule()->pktin_poll(sched_cmd->pe))
then successful do:
odp_pktio_recv(entry->s.handle, pkt_tbl, QUEUE_MULTI_MAX);

after that other thread calls terminate and bellow in the pktio_poll code:
queue_enq_multi(qentry, hdr_tbl, num_enq);

qentry here is corrupted due to it has been destroyed by other thread.

Because of qentry is linked to pktio entry we have to lock pktio entry 
for that
queue to make sure that is was not modified while pktin_poll execution.

I did make check from the root to find that problem (it occurs about 1 
of 10 run times).

I sent back trace some to mailing list some time ago:

ore was generated by `./test/validation/odp_pktio'.
Program terminated with signal SIGSEGV, Segmentation fault.
#0  0x0000000000411dc4 in odp_atomic_fetch_inc_u32 (atom=0x2baaaadfff00)
at ./include/odp/atomic.h:70
70        return __atomic_fetch_add(&atom->v, 1, __ATOMIC_RELAXED);
(gdb) bt
#0  0x0000000000411dc4 in odp_atomic_fetch_inc_u32 (atom=0x2baaaadfff00)
at ./include/odp/atomic.h:70
#1  0x0000000000411e8a in odp_ticketlock_lock
(ticketlock=0x2baaaadfff00) at odp_ticketlock.c:28
#2  0x000000000040f0f8 in queue_enq_multi (queue=0x2baaaadfff00,
buf_hdr=0x7fff1fccb0b0, num=1) at odp_queue.c:376
#3  0x000000000040987d in pktin_poll (entry=0x2aaaab200600) at
odp_packet_io.c:713
#4  0x0000000000410378 in schedule (out_queue=0x0,
out_ev=0x7fff1fccb1d8, max_num=1, max_deq=4) at odp_schedule.c:455
#5  0x000000000041050a in schedule_loop (out_queue=0x0, wait=1,
out_ev=0x7fff1fccb1d8, max_num=1, max_deq=4) at odp_schedule.c:518
#6  0x00000000004105a4 in odp_schedule (out_queue=0x0, wait=1) at
odp_schedule.c:551
#7  0x0000000000402b83 in destroy_inq (pktio=0x2) at odp_pktio.c:320
#8  0x00000000004032fa in pktio_test_txrx (q_type=1, num_pkts=4) at
odp_pktio.c:474
#9  0x000000000040337d in test_odp_pktio_poll_multi () at odp_pktio.c:487
#10 0x00007f3e96575482 in run_single_test () from
/usr/local/lib/libcunit.so.1
#11 0x00007f3e965750b2 in run_single_suite () from
/usr/local/lib/libcunit.so.1
#12 0x00007f3e96572d55 in CU_run_all_tests () from
/usr/local/lib/libcunit.so.1
#13 0x00007f3e96577245 in basic_run_all_tests () from
/usr/local/lib/libcunit.so.1
#14 0x00007f3e96576fe7 in CU_basic_run_tests () from
/usr/local/lib/libcunit.so.1
#15 0x0000000000403fe1 in main () at common/odp_cunit_common.c:77
(gdb) p atom
$1 = (odp_atomic_u32_t *) 0x2baaaadfff00
(gdb) p atom->v
Cannot access memory at address 0x2baaaadfff00
(gdb) up
#1  0x0000000000411e8a in odp_ticketlock_lock
(ticketlock=0x2baaaadfff00) at odp_ticketlock.c:28
28        ticket = odp_atomic_fetch_inc_u32(&ticketlock->next_ticket);
(gdb) up
#2  0x000000000040f0f8 in queue_enq_multi (queue=0x2baaaadfff00,
buf_hdr=0x7fff1fccb0b0, num=1) at odp_queue.c:376
376        LOCK(&queue->s.lock);
(gdb)


>
> Another reason I'm not keen on this change is that those locks really
> need to go as they serialise all send/recv calls (per pktio) and kill
> scalability. As far as I can see they're only there to protect against
> the application closing a pktio handle while another thread is using it,
> but IMO we should consider that an application error. The only
> linux-generic implementation that needs them is MMAP, and that can be
> fixed. So it would be good to find a solution for this problem that
> doesn't depend on those locks.
Yes, that is true. There might be sched lock should be somewhere or it 
should not be possible
to terminate from other thread resources related to other thread. We 
know that odp_schedule()
is 10-20 times slower than direct pktio, so probably we need to 
reconsider locking scheme in
the scheduler.

Maxim.
>
> --
> Stuart.
Ciprian Barbu April 1, 2015, 3:23 p.m. UTC | #3
There is another problem, although I don't fully understand the
consequences. The odp_pktio validation testsuite calls
odp_pktio_inq_remdef then calls another round of odp_schedule to clear
the internal caches. What that does is that the internal qentry
associated with the pktio is set to ODP_QUEUE_INVALID. If this is a
valid use (which I doubt) then queue_enq_multi should check that
qentry->s.default_inq is still valid.

It's not clear how that causes the crash, as the trace Maxim added
shows it seems that the atomic variable is invalid (and I've seen some
relaxed atomic checking, maybe that has something to do with it).
Also, I don't see any thread created by odp_pktio, it looks like it's
always running on a single core.

On Wed, Apr 1, 2015 at 5:47 PM, Maxim Uvarov <maxim.uvarov@linaro.org> wrote:
> On 04/01/15 16:57, Stuart Haslam wrote:
>>
>> On Wed, Apr 01, 2015 at 04:30:59PM +0300, Maxim Uvarov wrote:
>>>
>>> Commit:
>>>   8fa64fb9fc4e43fbd9d3c226ed89229863bdb771
>>>   linux-generic: scheduler: restructured queue and pktio integration
>>> Implement race with schedule termination and polling input queue.
>>> This patch locks pktio while doing poll to prevent destroy linked
>>> queue in the middle of this poll.
>>>
>> Admittedly I've not looked in detail at the terminate sequence after the
>> scheduler changes, so don't really understand what you're fixing, but
>> this feels like a workaround rather than a fix. Shouldn't the pktin
>> queue have been removed from the scheduler before it's closed? What's
>> the sequence that leads to the problem?
>
> as I understand that right one thread goes to
> schedule()->pktin_poll(sched_cmd->pe))
> then successful do:
> odp_pktio_recv(entry->s.handle, pkt_tbl, QUEUE_MULTI_MAX);
>
> after that other thread calls terminate and bellow in the pktio_poll code:
> queue_enq_multi(qentry, hdr_tbl, num_enq);
>
> qentry here is corrupted due to it has been destroyed by other thread.
>
> Because of qentry is linked to pktio entry we have to lock pktio entry for
> that
> queue to make sure that is was not modified while pktin_poll execution.
>
> I did make check from the root to find that problem (it occurs about 1 of 10
> run times).
>
> I sent back trace some to mailing list some time ago:
>
> ore was generated by `./test/validation/odp_pktio'.
> Program terminated with signal SIGSEGV, Segmentation fault.
> #0  0x0000000000411dc4 in odp_atomic_fetch_inc_u32 (atom=0x2baaaadfff00)
> at ./include/odp/atomic.h:70
> 70        return __atomic_fetch_add(&atom->v, 1, __ATOMIC_RELAXED);
> (gdb) bt
> #0  0x0000000000411dc4 in odp_atomic_fetch_inc_u32 (atom=0x2baaaadfff00)
> at ./include/odp/atomic.h:70
> #1  0x0000000000411e8a in odp_ticketlock_lock
> (ticketlock=0x2baaaadfff00) at odp_ticketlock.c:28
> #2  0x000000000040f0f8 in queue_enq_multi (queue=0x2baaaadfff00,
> buf_hdr=0x7fff1fccb0b0, num=1) at odp_queue.c:376
> #3  0x000000000040987d in pktin_poll (entry=0x2aaaab200600) at
> odp_packet_io.c:713
> #4  0x0000000000410378 in schedule (out_queue=0x0,
> out_ev=0x7fff1fccb1d8, max_num=1, max_deq=4) at odp_schedule.c:455
> #5  0x000000000041050a in schedule_loop (out_queue=0x0, wait=1,
> out_ev=0x7fff1fccb1d8, max_num=1, max_deq=4) at odp_schedule.c:518
> #6  0x00000000004105a4 in odp_schedule (out_queue=0x0, wait=1) at
> odp_schedule.c:551
> #7  0x0000000000402b83 in destroy_inq (pktio=0x2) at odp_pktio.c:320
> #8  0x00000000004032fa in pktio_test_txrx (q_type=1, num_pkts=4) at
> odp_pktio.c:474
> #9  0x000000000040337d in test_odp_pktio_poll_multi () at odp_pktio.c:487
> #10 0x00007f3e96575482 in run_single_test () from
> /usr/local/lib/libcunit.so.1
> #11 0x00007f3e965750b2 in run_single_suite () from
> /usr/local/lib/libcunit.so.1
> #12 0x00007f3e96572d55 in CU_run_all_tests () from
> /usr/local/lib/libcunit.so.1
> #13 0x00007f3e96577245 in basic_run_all_tests () from
> /usr/local/lib/libcunit.so.1
> #14 0x00007f3e96576fe7 in CU_basic_run_tests () from
> /usr/local/lib/libcunit.so.1
> #15 0x0000000000403fe1 in main () at common/odp_cunit_common.c:77
> (gdb) p atom
> $1 = (odp_atomic_u32_t *) 0x2baaaadfff00
> (gdb) p atom->v
> Cannot access memory at address 0x2baaaadfff00
> (gdb) up
> #1  0x0000000000411e8a in odp_ticketlock_lock
> (ticketlock=0x2baaaadfff00) at odp_ticketlock.c:28
> 28        ticket = odp_atomic_fetch_inc_u32(&ticketlock->next_ticket);
> (gdb) up
> #2  0x000000000040f0f8 in queue_enq_multi (queue=0x2baaaadfff00,
> buf_hdr=0x7fff1fccb0b0, num=1) at odp_queue.c:376
> 376        LOCK(&queue->s.lock);
> (gdb)
>
>
>>
>> Another reason I'm not keen on this change is that those locks really
>> need to go as they serialise all send/recv calls (per pktio) and kill
>> scalability. As far as I can see they're only there to protect against
>> the application closing a pktio handle while another thread is using it,
>> but IMO we should consider that an application error. The only
>> linux-generic implementation that needs them is MMAP, and that can be
>> fixed. So it would be good to find a solution for this problem that
>> doesn't depend on those locks.
>
> Yes, that is true. There might be sched lock should be somewhere or it
> should not be possible
> to terminate from other thread resources related to other thread. We know
> that odp_schedule()
> is 10-20 times slower than direct pktio, so probably we need to reconsider
> locking scheme in
> the scheduler.
>
> Maxim.
>>
>>
>> --
>> Stuart.
>
>
Stuart Haslam April 1, 2015, 4:01 p.m. UTC | #4
On Wed, Apr 01, 2015 at 06:23:53PM +0300, Ciprian Barbu wrote:
 
> It's not clear how that causes the crash, as the trace Maxim added
> shows it seems that the atomic variable is invalid (and I've seen some
> relaxed atomic checking, maybe that has something to do with it).

This is just the first point at which it blows up when the qentry
pointer is bogus, as the first thing queue_enq_multi() tries to do is
take a lock.
Maxim Uvarov April 1, 2015, 4:23 p.m. UTC | #5
On 04/01/15 18:29, Stuart Haslam wrote:
> On Wed, Apr 01, 2015 at 05:47:51PM +0300, Maxim Uvarov wrote:
>> On 04/01/15 16:57, Stuart Haslam wrote:
>>> On Wed, Apr 01, 2015 at 04:30:59PM +0300, Maxim Uvarov wrote:
>>>> Commit:
>>>>   8fa64fb9fc4e43fbd9d3c226ed89229863bdb771
>>>>   linux-generic: scheduler: restructured queue and pktio integration
>>>> Implement race with schedule termination and polling input queue.
>>>> This patch locks pktio while doing poll to prevent destroy linked
>>>> queue in the middle of this poll.
>>>>
>>> Admittedly I've not looked in detail at the terminate sequence after the
>>> scheduler changes, so don't really understand what you're fixing, but
>>> this feels like a workaround rather than a fix. Shouldn't the pktin
>>> queue have been removed from the scheduler before it's closed? What's
>>> the sequence that leads to the problem?
>> as I understand that right one thread goes to
>> schedule()->pktin_poll(sched_cmd->pe))
>> then successful do:
>> odp_pktio_recv(entry->s.handle, pkt_tbl, QUEUE_MULTI_MAX);
>>
>> after that other thread calls terminate and bellow in the pktio_poll code:
>> queue_enq_multi(qentry, hdr_tbl, num_enq);
>>
>> qentry here is corrupted due to it has been destroyed by other thread.
>>
> I see from the trace that you can reproduce this with the odp_pktio.c
> validation test, which is single threaded so there's no thread race.
>
>> Because of qentry is linked to pktio entry we have to lock pktio
>> entry for that
>> queue to make sure that is was not modified while pktin_poll execution.
>>
>> I did make check from the root to find that problem (it occurs about
>> 1 of 10 run times).
>>
>> I sent back trace some to mailing list some time ago:
>>
>> ore was generated by `./test/validation/odp_pktio'.
>> Program terminated with signal SIGSEGV, Segmentation fault.
>> #0  0x0000000000411dc4 in odp_atomic_fetch_inc_u32 (atom=0x2baaaadfff00)
>> at ./include/odp/atomic.h:70
>> 70        return __atomic_fetch_add(&atom->v, 1, __ATOMIC_RELAXED);
>> (gdb) bt
>> #0  0x0000000000411dc4 in odp_atomic_fetch_inc_u32 (atom=0x2baaaadfff00)
>> at ./include/odp/atomic.h:70
>> #1  0x0000000000411e8a in odp_ticketlock_lock
>> (ticketlock=0x2baaaadfff00) at odp_ticketlock.c:28
>> #2  0x000000000040f0f8 in queue_enq_multi (queue=0x2baaaadfff00,
>> buf_hdr=0x7fff1fccb0b0, num=1) at odp_queue.c:376
>> #3  0x000000000040987d in pktin_poll (entry=0x2aaaab200600) at
>> odp_packet_io.c:713
>> #4  0x0000000000410378 in schedule (out_queue=0x0,
>> out_ev=0x7fff1fccb1d8, max_num=1, max_deq=4) at odp_schedule.c:455
>> #5  0x000000000041050a in schedule_loop (out_queue=0x0, wait=1,
>> out_ev=0x7fff1fccb1d8, max_num=1, max_deq=4) at odp_schedule.c:518
>> #6  0x00000000004105a4 in odp_schedule (out_queue=0x0, wait=1) at
>> odp_schedule.c:551
>> #7  0x0000000000402b83 in destroy_inq (pktio=0x2) at odp_pktio.c:320
> So the problem sequence is this;
>
>   1. destroy_inq() calls odp_pktio_inq_remdef()
>   2. odp_pktio_inq_remdef() sets the pktio's inq_default to
>      ODP_QUEUE_INVALID and does nothing to remove the queue from
>      scheduler (not sure if it should?)
>   3. destroy_inq() calls odp_schedule()
>   4. odp_schedule() dequeues the event to poll the pktin queue and then
>      calls pktin_poll()
>   5. pktin_poll() attempts to fetch some packets from the pktin, and if
>      it receives any, attempts to enqueue them to using inq_default,
>      which by this point is ODP_QUEUE_INVALID.
>
> So there's a fundamental breakage. I'm not sure yet how it should be
> fixed but this patch will make it go away;
Yes, confirm. Thanks for describing that. Patch fixes current problem so 
I think it should go before next tag.
Please send it to mailing list.

Maxim.

> diff --git a/platform/linux-generic/odp_packet_io.c b/platform/linux-generic/odp_packet_io.c
> index 15335d1..ae1de3c 100644
> --- a/platform/linux-generic/odp_packet_io.c
> +++ b/platform/linux-generic/odp_packet_io.c
> @@ -685,6 +685,9 @@ int pktin_poll(pktio_entry_t *entry)
>          if (odp_unlikely(is_free(entry)))
>                  return -1;
>   
> +       if (entry->s.inq_default == ODP_QUEUE_INVALID)
> +               return -1;
> +
>          num = odp_pktio_recv(entry->s.handle, pkt_tbl, QUEUE_MULTI_MAX);
>   
>          if (num < 0) {
>
>
> The race is that you'll only get the crash if some packets were actually
>   received between the last time the validation test itself called
> odp_schedule() and calling destroy_inq(). You'd never see it on the
> linux-generic "loop" interface as traffic doesn't just mysteriously
> appear there.
>
diff mbox

Patch

diff --git a/platform/linux-generic/include/odp_packet_io_internal.h b/platform/linux-generic/include/odp_packet_io_internal.h
index 18b59ef..804ceda 100644
--- a/platform/linux-generic/include/odp_packet_io_internal.h
+++ b/platform/linux-generic/include/odp_packet_io_internal.h
@@ -88,6 +88,8 @@  static inline pktio_entry_t *get_pktio_entry(odp_pktio_t pktio)
 
 int pktin_poll(pktio_entry_t *entry);
 
+int __odp_pktio_recv(odp_pktio_t id, odp_packet_t pkt_table[], int len);
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/platform/linux-generic/odp_packet_io.c b/platform/linux-generic/odp_packet_io.c
index e7d3c1b..391a8b3 100644
--- a/platform/linux-generic/odp_packet_io.c
+++ b/platform/linux-generic/odp_packet_io.c
@@ -382,7 +382,7 @@  static int deq_loopback(pktio_entry_t *pktio_entry, odp_packet_t pkts[],
 	return nbr;
 }
 
-int odp_pktio_recv(odp_pktio_t id, odp_packet_t pkt_table[], int len)
+int __odp_pktio_recv(odp_pktio_t id, odp_packet_t pkt_table[], int len)
 {
 	pktio_entry_t *pktio_entry = get_pktio_entry(id);
 	int pkts;
@@ -391,7 +391,6 @@  int odp_pktio_recv(odp_pktio_t id, odp_packet_t pkt_table[], int len)
 	if (pktio_entry == NULL)
 		return -1;
 
-	lock_entry(pktio_entry);
 	switch (pktio_entry->s.type) {
 	case ODP_PKTIO_TYPE_SOCKET_BASIC:
 		pkts = recv_pkt_sock_basic(&pktio_entry->s.pkt_sock,
@@ -413,7 +412,6 @@  int odp_pktio_recv(odp_pktio_t id, odp_packet_t pkt_table[], int len)
 		break;
 	}
 
-	unlock_entry(pktio_entry);
 	if (pkts < 0)
 		return pkts;
 
@@ -423,6 +421,20 @@  int odp_pktio_recv(odp_pktio_t id, odp_packet_t pkt_table[], int len)
 	return pkts;
 }
 
+int odp_pktio_recv(odp_pktio_t id, odp_packet_t pkt_table[], int len)
+{
+	pktio_entry_t *pktio_entry = get_pktio_entry(id);
+	int ret = -1;
+
+	if (pktio_entry == NULL)
+		return -1;
+
+	lock_entry(pktio_entry);
+	ret = __odp_pktio_recv(id, pkt_table, len);
+	unlock_entry(pktio_entry);
+
+	return ret;
+}
 static int enq_loopback(pktio_entry_t *pktio_entry, odp_packet_t pkt_tbl[],
 			unsigned len)
 {
@@ -681,13 +693,26 @@  int pktin_poll(pktio_entry_t *entry)
 	odp_packet_t pkt_tbl[QUEUE_MULTI_MAX];
 	odp_buffer_hdr_t *hdr_tbl[QUEUE_MULTI_MAX];
 	int num, num_enq, i;
+	queue_entry_t *qentry;
+	pktio_entry_t *pktio_entry;
 
 	if (odp_unlikely(is_free(entry)))
 		return -1;
 
-	num = odp_pktio_recv(entry->s.handle, pkt_tbl, QUEUE_MULTI_MAX);
+	pktio_entry = get_pktio_entry(entry->s.handle);
+
+	lock_entry(pktio_entry);
+	if (entry->s.inq_default == ODP_QUEUE_INVALID) {
+		unlock_entry(pktio_entry);
+		return -1;
+	}
+
+	qentry = queue_to_qentry(entry->s.inq_default);
+
+	num = __odp_pktio_recv(entry->s.handle, pkt_tbl, QUEUE_MULTI_MAX);
 
 	if (num < 0) {
+		unlock_entry(pktio_entry);
 		ODP_ERR("Packet recv error\n");
 		return -1;
 	}
@@ -707,12 +732,10 @@  int pktin_poll(pktio_entry_t *entry)
 		}
 	}
 
-	if (num_enq) {
-		queue_entry_t *qentry;
-		qentry = queue_to_qentry(entry->s.inq_default);
+	if (num_enq)
 		queue_enq_multi(qentry, hdr_tbl, num_enq);
-	}
 
+	unlock_entry(pktio_entry);
 	return 0;
 }