Message ID | 1454490337-25997-1-git-send-email-maxim.uvarov@linaro.org |
---|---|
State | Superseded |
Headers | show |
I'm not sure this is working properly. Running examples/timer/odp_timer_test without this patch I see this sort of output: bill@Ubuntu15:~/linaro/odp/example/timer$ ./odp_timer_test ODP timer example starts odp_pool.c:103:odp_pool_init_global(): Pool init global odp_pool.c:104:odp_pool_init_global(): pool_entry_s size 8512 odp_pool.c:105:odp_pool_init_global(): pool_entry_t size 8512 odp_pool.c:106:odp_pool_init_global(): odp_buffer_hdr_t size 168 odp_pool.c:107:odp_pool_init_global(): odp_queue.c:146:odp_queue_init_global():Queue init ... odp_queue.c:170:odp_queue_init_global():done odp_queue.c:171:odp_queue_init_global():Queue init global odp_queue.c:173:odp_queue_init_global(): struct queue_entry_s size 320 odp_queue.c:175:odp_queue_init_global(): queue_entry_t size 320 odp_queue.c:176:odp_queue_init_global(): odp_schedule.c:123:odp_schedule_init_global():Schedule init ... odp_schedule.c:188:odp_schedule_init_global():done ODP system info --------------- ODP API version: 1.6.0 CPU model: Intel(R) Core(TM) i7-4790K CPU @ 4.00GHz CPU freq (hz): 3990762000 Cache line size: 64 Max CPU count: 4 odp_cpumask_task.c:44:odp_cpumask_default_worker(): CPU0 will be used for both control and worker threads, this will likely have a performance impact on the worker thread. num worker threads: 4 first CPU: 0 cpu mask: 0xF resolution: 10000 usec min timeout: 0 usec max timeout: 10000000 usec period: 1000000 usec timeouts: 30 odp_timer.c:674:itimer_init():Creating POSIX timer for timer pool timer_pool, period 10000000 ns Shared memory -------------- page size: 4 kB huge page size: 2048 kB id name kB align huge addr 0 odp_thread_globals 33 64 * 0x7f2f66400000 1 odp_buffer_pools 133 8512 * 0x7f2f66201e00 2 odp_queues 320 320 * 0x7f2f66000040 3 odp_scheduler 10 64 * 0x7f2f65e00000 4 odp_sched_pool 204 4096 * 0x7f2f65c00000 5 odp_pktio_entries 96 1536 * 0x7f2f65a00000 6 crypto_pool 15 64 * 0x7f2f65800000 7 shm_odp_cos_tbl 8 128 * 0x7f2f65600000 8 shm_odp_pmr_tbl 4 64 * 0x7f2f65400000 9 shm_odp_pmr_set_tbl 20 320 * 0x7f2f65200080 10 shm_test_globals 4 64 * 0x7f2f65000000 11 msg_pool 1876 4096 * 0x7f2f64e00000 12 timer_pool 0 64 0x7f2f67a05000 Timer pool ---------- name: timer_pool resolution: 10000000 ns min tmo: 0 ticks max tmo: 10000000000 ticks CPU freq 3990762000 Hz Timer ticks vs nanoseconds: 0 ns -> 0 ticks 0 ticks -> 0 ns 1 ns -> 0 ticks 0 ticks -> 0 ns 10 ns -> 0 ticks 0 ticks -> 0 ns 100 ns -> 0 ticks 0 ticks -> 0 ns 1000 ns -> 0 ticks 0 ticks -> 0 ns 10000 ns -> 0 ticks 0 ticks -> 0 ns 100000 ns -> 0 ticks 0 ticks -> 0 ns 1000000 ns -> 0 ticks 0 ticks -> 0 ns 10000000 ns -> 1 ticks 1 ticks -> 10000000 ns 100000000 ns -> 10 ticks 10 ticks -> 100000000 ns 1000000000 ns -> 100 ticks 100 ticks -> 1000000000 ns 10000000000 ns -> 1000 ticks 1000 ticks -> 10000000000 ns 100000000000 ns -> 10000 ticks 10000 ticks -> 100000000000 ns Thread 1 starts on cpu 0 Thread 2 starts on cpu 1 Thread 3 starts on cpu 3 Thread 4 starts on cpu 2 odp_timer_test.c:94:test_abs_timeouts(): [4] test_timeouts odp_timer_test.c:102:test_abs_timeouts(): [4] period 100 ticks, 1000000000 ns odp_timer_test.c:105:test_abs_timeouts(): [4] current tick 0 odp_timer_test.c:94:test_abs_timeouts(): [3] test_timeouts odp_timer_test.c:102:test_abs_timeouts(): [3] period 100 ticks, 1000000000 ns odp_timer_test.c:105:test_abs_timeouts(): [3] current tick 0 odp_timer_test.c:94:test_abs_timeouts(): [1] test_timeouts odp_timer_test.c:102:test_abs_timeouts(): [1] period 100 ticks, 1000000000 ns odp_timer_test.c:105:test_abs_timeouts(): [1] current tick 0 odp_timer_test.c:94:test_abs_timeouts(): [2] test_timeouts odp_timer_test.c:102:test_abs_timeouts(): [2] period 100 ticks, 1000000000 ns odp_timer_test.c:105:test_abs_timeouts(): [2] current tick 0 odp_timer_test.c:171:test_abs_timeouts(): [1] timeout, tick 100 odp_timer_test.c:171:test_abs_timeouts(): [1] timeout, tick 100 odp_timer_test.c:171:test_abs_timeouts(): [1] timeout, tick 100 odp_timer_test.c:171:test_abs_timeouts(): [1] timeout, tick 100 odp_timer_test.c:171:test_abs_timeouts(): [1] timeout, tick 200 odp_timer_test.c:171:test_abs_timeouts(): [1] timeout, tick 200 odp_timer_test.c:171:test_abs_timeouts(): [1] timeout, tick 200 odp_timer_test.c:171:test_abs_timeouts(): [1] timeout, tick 200 ...lines omitted odp_timer_test.c:171:test_abs_timeouts(): [1] timeout, tick 3000 odp_timer_test.c:171:test_abs_timeouts(): [1] timeout, tick 3000 odp_timer_test.c:171:test_abs_timeouts(): [1] timeout, tick 3000 Thread 2 exits Thread 3 exits Thread 4 exits Thread 1 exits ODP timer test complete ------------------------------------------ However with this patch applied I get this: bill@Ubuntu15:~/linaro/maximtimer/example/timer$ ./odp_timer_test ODP timer example starts odp_pool.c:103:odp_pool_init_global(): Pool init global odp_pool.c:104:odp_pool_init_global(): pool_entry_s size 8512 odp_pool.c:105:odp_pool_init_global(): pool_entry_t size 8512 odp_pool.c:106:odp_pool_init_global(): odp_buffer_hdr_t size 168 odp_pool.c:107:odp_pool_init_global(): odp_queue.c:146:odp_queue_init_global():Queue init ... odp_queue.c:170:odp_queue_init_global():done odp_queue.c:171:odp_queue_init_global():Queue init global odp_queue.c:173:odp_queue_init_global(): struct queue_entry_s size 320 odp_queue.c:175:odp_queue_init_global(): queue_entry_t size 320 odp_queue.c:176:odp_queue_init_global(): odp_schedule.c:123:odp_schedule_init_global():Schedule init ... odp_schedule.c:188:odp_schedule_init_global():done ODP system info --------------- ODP API version: 1.6.0 CPU model: Intel(R) Core(TM) i7-4790K CPU @ 4.00GHz CPU freq (hz): 3990762000 Cache line size: 64 Max CPU count: 4 odp_cpumask_task.c:44:odp_cpumask_default_worker(): CPU0 will be used for both control and worker threads, this will likely have a performance impact on the worker thread. num worker threads: 4 first CPU: 0 cpu mask: 0xF resolution: 10000 usec min timeout: 0 usec max timeout: 10000000 usec period: 1000000 usec timeouts: 30 odp_timer.c:729:itimer_init():Creating POSIX timer for timer pool timer_pool, period 10000000 ns Shared memory -------------- page size: 4 kB huge page size: 2048 kB id name kB align huge addr 0 odp_thread_globals 33 64 * 0x7f02c2400000 1 odp_buffer_pools 133 8512 * 0x7f02c2201b80 2 odp_queues 320 320 * 0x7f02c20000c0 3 odp_scheduler 10 64 * 0x7f02c1e00000 4 odp_sched_pool 204 4096 * 0x7f02c1c00000 5 odp_pktio_entries 96 1536 * 0x7f02c1a00400 6 crypto_pool 15 64 * 0x7f02c1800000 7 shm_odp_cos_tbl 8 128 * 0x7f02c1600000 8 shm_odp_pmr_tbl 4 64 * 0x7f02c1400000 9 shm_odp_pmr_set_tbl 20 320 * 0x7f02c1200100 10 shm_test_globals 4 64 * 0x7f02c1000000 11 msg_pool 1876 4096 * 0x7f02c0e00000 12 timer_pool 0 64 0x7f02c39d8000 Timer pool ---------- name: timer_pool resolution: 10000000 ns min tmo: 0 ticks max tmo: 10000000000 ticks CPU freq 3990762000 Hz Timer ticks vs nanoseconds: 0 ns -> 0 ticks 0 ticks -> 0 ns 1 ns -> 0 ticks 0 ticks -> 0 ns 10 ns -> 0 ticks 0 ticks -> 0 ns 100 ns -> 0 ticks 0 ticks -> 0 ns 1000 ns -> 0 ticks 0 ticks -> 0 ns 10000 ns -> 0 ticks 0 ticks -> 0 ns 100000 ns -> 0 ticks 0 ticks -> 0 ns 1000000 ns -> 0 ticks 0 ticks -> 0 ns 10000000 ns -> 1 ticks 1 ticks -> 10000000 ns 100000000 ns -> 10 ticks 10 ticks -> 100000000 ns 1000000000 ns -> 100 ticks 100 ticks -> 1000000000 ns 10000000000 ns -> 1000 ticks 1000 ticks -> 10000000000 ns 100000000000 ns -> 10000 ticks 10000 ticks -> 100000000000 ns Thread 1 starts on cpu 0 Thread 2 starts on cpu 1 Thread 3 starts on cpu 3 Thread 4 starts on cpu 2 odp_timer_test.c:94:test_abs_timeouts(): [3] test_timeouts odp_timer_test.c:102:test_abs_timeouts(): [3] period 100 ticks, 1000000000 ns odp_timer_test.c:105:test_abs_timeouts(): [3] current tick 0 odp_timer_test.c:94:test_abs_timeouts(): [2] test_timeouts odp_timer_test.c:102:test_abs_timeouts(): [2] period 100 ticks, 1000000000 ns odp_timer_test.c:105:test_abs_timeouts(): [2] current tick 0 odp_timer_test.c:94:test_abs_timeouts(): [4] test_timeouts odp_timer_test.c:102:test_abs_timeouts(): [4] period 100 ticks, 1000000000 ns odp_timer_test.c:105:test_abs_timeouts(): [4] current tick 0 odp_timer_test.c:94:test_abs_timeouts(): [1] test_timeouts odp_timer_test.c:102:test_abs_timeouts(): [1] period 100 ticks, 1000000000 ns odp_timer_test.c:105:test_abs_timeouts(): [1] current tick 0 Alarm clock bill@Ubuntu15:~/linaro/maximtimer/example/timer$ On Wed, Feb 3, 2016 at 3:05 AM, Maxim Uvarov <maxim.uvarov@linaro.org> wrote: > Switch timer to use SIGEV_THREAD_ID instead of SIGEV_THREAD. > I.e. do not start timer handle thread on each timer action. > Start timer handle manually and wait for signal there. > This patch also fixes nasty bug with hanging timer threads, > which wants to access to timer pool and free shm on pool > destroy action. > > Signed-off-by: Maxim Uvarov <maxim.uvarov@linaro.org> > --- > platform/linux-generic/odp_timer.c | 106 > ++++++++++++++++++++++++++++++------- > 1 file changed, 87 insertions(+), 19 deletions(-) > > diff --git a/platform/linux-generic/odp_timer.c > b/platform/linux-generic/odp_timer.c > index 1001af8..8bc8cc8 100644 > --- a/platform/linux-generic/odp_timer.c > +++ b/platform/linux-generic/odp_timer.c > @@ -27,6 +27,10 @@ > #include <stdlib.h> > #include <time.h> > #include <signal.h> > +#include <pthread.h> > +#include <unistd.h> > +#include <sys/syscall.h> > + > #include <odp/align.h> > #include <odp_align_internal.h> > #include <odp/atomic.h> > @@ -159,7 +163,6 @@ typedef struct odp_timer_pool_s { > tick_buf_t *tick_buf; /* Expiration tick and timeout buffer */ > odp_timer *timers; /* User pointer and queue handle (and lock) */ > odp_atomic_u32_t high_wm;/* High watermark of allocated timers */ > - odp_spinlock_t itimer_running; > odp_spinlock_t lock; > uint32_t num_alloc;/* Current number of allocated timers */ > uint32_t first_free;/* 0..max_timers-1 => free timer */ > @@ -169,6 +172,9 @@ typedef struct odp_timer_pool_s { > odp_shm_t shm; > timer_t timerid; > int notify_overrun; > + pthread_t timer_thread; /* pthread_t of timer thread */ > + pid_t timer_thread_id; /* gettid() for timer thread */ > + int timer_thread_exit; /* request to exit for timer thread */ > } odp_timer_pool; > > #define MAX_TIMER_POOLS 255 /* Leave one for ODP_TIMER_INVALID */ > @@ -254,26 +260,47 @@ static odp_timer_pool *odp_timer_pool_new( > } > tp->tp_idx = tp_idx; > odp_spinlock_init(&tp->lock); > - odp_spinlock_init(&tp->itimer_running); > timer_pool[tp_idx] = tp; > if (tp->param.clk_src == ODP_CLOCK_CPU) > itimer_init(tp); > return tp; > } > > +static void block_sigalarm(void) > +{ > + sigset_t sigset; > + > + sigemptyset(&sigset); > + sigaddset(&sigset, SIGALRM); > + sigprocmask(SIG_BLOCK, &sigset, NULL); > +} > + > +static void stop_timer_thread(odp_timer_pool *tp) > +{ > + int ret; > + > + tp->timer_thread_exit = 1; > + ret = pthread_join(tp->timer_thread, NULL); > + if (ret != 0) > + ODP_ABORT("unable to join thread, err %d\n", ret); > +} > + > static void odp_timer_pool_del(odp_timer_pool *tp) > { > odp_spinlock_lock(&tp->lock); > timer_pool[tp->tp_idx] = NULL; > - /* Wait for itimer thread to stop running */ > - odp_spinlock_lock(&tp->itimer_running); > + > + /* Stop timer triggering */ > + if (tp->param.clk_src == ODP_CLOCK_CPU) > + itimer_fini(tp); > + > + stop_timer_thread(tp); > + > if (tp->num_alloc != 0) { > /* It's a programming error to attempt to destroy a */ > /* timer pool which is still in use */ > ODP_ABORT("%s: timers in use\n", tp->name); > } > - if (tp->param.clk_src == ODP_CLOCK_CPU) > - itimer_fini(tp); > int rc = odp_shm_free(tp->shm); > if (rc != 0) > ODP_ABORT("Failed to free shared memory (%d)\n", rc); > @@ -632,10 +659,10 @@ static unsigned > odp_timer_pool_expire(odp_timer_pool_t tpid, uint64_t tick) > * Functions that use Linux/POSIX per-process timers and related > facilities > > *****************************************************************************/ > > -static void timer_notify(sigval_t sigval) > +static void timer_notify(odp_timer_pool *tp) > { > int overrun; > - odp_timer_pool *tp = (odp_timer_pool *)sigval.sival_ptr; > + int64_t prev_tick; > > if (tp->notify_overrun) { > overrun = timer_getoverrun(tp->timerid); > @@ -653,32 +680,72 @@ static void timer_notify(sigval_t sigval) > for (i = 0; i < 32; i += ODP_CACHE_LINE_SIZE / sizeof(array[0])) > PREFETCH(&array[i]); > #endif > - uint64_t prev_tick = odp_atomic_fetch_inc_u64(&tp->cur_tick); > - /* Attempt to acquire the lock, check if the old value was clear */ > - if (odp_spinlock_trylock(&tp->itimer_running)) { > - /* Scan timer array, looking for timers to expire */ > - (void)odp_timer_pool_expire(tp, prev_tick); > - odp_spinlock_unlock(&tp->itimer_running); > - } > + prev_tick = odp_atomic_fetch_inc_u64(&tp->cur_tick); > + > + /* Scan timer array, looking for timers to expire */ > + (void)odp_timer_pool_expire(tp, prev_tick); > + > /* Else skip scan of timers. cur_tick was updated and next itimer > * invocation will process older expiration ticks as well */ > } > > +static void *timer_thread(void *arg) > +{ > + odp_timer_pool *tp = (odp_timer_pool *)arg; > + sigset_t sigset; > + int ret; > + struct timespec tmo; > + siginfo_t si; > + > + tp->timer_thread_id = (pid_t)syscall(SYS_gettid); > + > + tmo.tv_sec = 0; > + tmo.tv_nsec = ODP_TIME_MSEC_IN_NS * 100; > + > + sigemptyset(&sigset); > + sigaddset(&sigset, SIGALRM); > + > + while (1) { > + ret = sigtimedwait(&sigset, &si, &tmo); > + if (tp->timer_thread_exit) { > + tp->timer_thread_id = 0; > + return NULL; > + } > + if (ret == 0) > + timer_notify(tp); > + } > + > + return NULL; > +} > + > static void itimer_init(odp_timer_pool *tp) > { > struct sigevent sigev; > struct itimerspec ispec; > uint64_t res, sec, nsec; > + int ret; > > ODP_DBG("Creating POSIX timer for timer pool %s, period %" > PRIu64" ns\n", tp->name, tp->param.res_ns); > > + tp->timer_thread_id = 0; > + ret = pthread_create(&tp->timer_thread, NULL, timer_thread, tp); > + if (ret) > + ODP_ABORT("unable to create timer thread\n"); > + > + /* wait thread set tp->timer_thread_id */ > + do { > + sched_yield(); > + } while (tp->timer_thread_id == 0); > + > + /* Block sigalarm in current thread */ > + block_sigalarm(); > + > memset(&sigev, 0, sizeof(sigev)); > - memset(&ispec, 0, sizeof(ispec)); > - > - sigev.sigev_notify = SIGEV_THREAD; > - sigev.sigev_notify_function = timer_notify; > + sigev.sigev_notify = SIGEV_THREAD_ID; > sigev.sigev_value.sival_ptr = tp; > + sigev._sigev_un._tid = tp->timer_thread_id; > + sigev.sigev_signo = SIGALRM; > > if (timer_create(CLOCK_MONOTONIC, &sigev, &tp->timerid)) > ODP_ABORT("timer_create() returned error %s\n", > @@ -688,6 +755,7 @@ static void itimer_init(odp_timer_pool *tp) > sec = res / ODP_TIME_SEC_IN_NS; > nsec = res - sec * ODP_TIME_SEC_IN_NS; > > + memset(&ispec, 0, sizeof(ispec)); > ispec.it_interval.tv_sec = (time_t)sec; > ispec.it_interval.tv_nsec = (long)nsec; > ispec.it_value.tv_sec = (time_t)sec; > -- > 1.9.1 > > _______________________________________________ > lng-odp mailing list > lng-odp@lists.linaro.org > https://lists.linaro.org/mailman/listinfo/lng-odp >
On 02/03/2016 18:48, Bill Fischofer wrote:
> Alarm clock
You should not see that. It has to be masked. I guess under gdb test passes.
Will check what went wrong. Might be deleted something on patch clean up.
Maxim.
btw, it's for api-next, I did not test it on master. But I think result will be the same. Maxim. On 02/03/2016 18:48, Bill Fischofer wrote: > I'm not sure this is working properly. Running > examples/timer/odp_timer_test without this patch I see this sort of > output: > > bill@Ubuntu15:~/linaro/odp/example/timer$ ./odp_timer_test > > ODP timer example starts > odp_pool.c:103:odp_pool_init_global(): > Pool init global > odp_pool.c:104:odp_pool_init_global(): pool_entry_s size 8512 > odp_pool.c:105:odp_pool_init_global(): pool_entry_t size 8512 > odp_pool.c:106:odp_pool_init_global(): odp_buffer_hdr_t size 168 > odp_pool.c:107:odp_pool_init_global(): > odp_queue.c:146:odp_queue_init_global():Queue init ... > odp_queue.c:170:odp_queue_init_global():done > odp_queue.c:171:odp_queue_init_global():Queue init global > odp_queue.c:173:odp_queue_init_global(): struct queue_entry_s size 320 > odp_queue.c:175:odp_queue_init_global(): queue_entry_t size 320 > odp_queue.c:176:odp_queue_init_global(): > odp_schedule.c:123:odp_schedule_init_global():Schedule init ... > odp_schedule.c:188:odp_schedule_init_global():done > > ODP system info > --------------- > ODP API version: 1.6.0 > CPU model: Intel(R) Core(TM) i7-4790K CPU @ 4.00GHz > CPU freq (hz): 3990762000 > Cache line size: 64 > Max CPU count: 4 > > odp_cpumask_task.c:44:odp_cpumask_default_worker(): > CPU0 will be used for both control and worker threads, > this will likely have a performance impact on the worker thread. > num worker threads: 4 > first CPU: 0 > cpu mask: 0xF > resolution: 10000 usec > min timeout: 0 usec > max timeout: 10000000 usec > period: 1000000 usec > timeouts: 30 > odp_timer.c:674:itimer_init():Creating POSIX timer for timer pool > timer_pool, period 10000000 ns > Shared memory > -------------- > page size: 4 kB > huge page size: 2048 kB > id name kB align huge addr > 0 odp_thread_globals 33 64 * 0x7f2f66400000 > 1 odp_buffer_pools 133 8512 * 0x7f2f66201e00 > 2 odp_queues 320 320 * 0x7f2f66000040 > 3 odp_scheduler 10 64 * 0x7f2f65e00000 > 4 odp_sched_pool 204 4096 * 0x7f2f65c00000 > 5 odp_pktio_entries 96 1536 * 0x7f2f65a00000 > 6 crypto_pool 15 64 * 0x7f2f65800000 > 7 shm_odp_cos_tbl 8 128 * 0x7f2f65600000 > 8 shm_odp_pmr_tbl 4 64 * 0x7f2f65400000 > 9 shm_odp_pmr_set_tbl 20 320 * 0x7f2f65200080 > 10 shm_test_globals 4 64 * 0x7f2f65000000 > 11 msg_pool 1876 4096 * 0x7f2f64e00000 > 12 timer_pool 0 64 0x7f2f67a05000 > Timer pool > ---------- > name: timer_pool > resolution: 10000000 ns > min tmo: 0 ticks > max tmo: 10000000000 ticks > > CPU freq 3990762000 Hz > Timer ticks vs nanoseconds: > 0 ns -> 0 ticks > 0 ticks -> 0 ns > 1 ns -> 0 ticks > 0 ticks -> 0 ns > 10 ns -> 0 ticks > 0 ticks -> 0 ns > 100 ns -> 0 ticks > 0 ticks -> 0 ns > 1000 ns -> 0 ticks > 0 ticks -> 0 ns > 10000 ns -> 0 ticks > 0 ticks -> 0 ns > 100000 ns -> 0 ticks > 0 ticks -> 0 ns > 1000000 ns -> 0 ticks > 0 ticks -> 0 ns > 10000000 ns -> 1 ticks > 1 ticks -> 10000000 ns > 100000000 ns -> 10 ticks > 10 ticks -> 100000000 ns > 1000000000 ns -> 100 ticks > 100 ticks -> 1000000000 ns > 10000000000 ns -> 1000 ticks > 1000 ticks -> 10000000000 ns > 100000000000 ns -> 10000 ticks > 10000 ticks -> 100000000000 ns > > Thread 1 starts on cpu 0 > Thread 2 starts on cpu 1 > Thread 3 starts on cpu 3 > Thread 4 starts on cpu 2 > odp_timer_test.c:94:test_abs_timeouts(): [4] test_timeouts > odp_timer_test.c:102:test_abs_timeouts(): [4] period 100 ticks, > 1000000000 ns > odp_timer_test.c:105:test_abs_timeouts(): [4] current tick 0 > odp_timer_test.c:94:test_abs_timeouts(): [3] test_timeouts > odp_timer_test.c:102:test_abs_timeouts(): [3] period 100 ticks, > 1000000000 ns > odp_timer_test.c:105:test_abs_timeouts(): [3] current tick 0 > odp_timer_test.c:94:test_abs_timeouts(): [1] test_timeouts > odp_timer_test.c:102:test_abs_timeouts(): [1] period 100 ticks, > 1000000000 ns > odp_timer_test.c:105:test_abs_timeouts(): [1] current tick 0 > odp_timer_test.c:94:test_abs_timeouts(): [2] test_timeouts > odp_timer_test.c:102:test_abs_timeouts(): [2] period 100 ticks, > 1000000000 ns > odp_timer_test.c:105:test_abs_timeouts(): [2] current tick 0 > odp_timer_test.c:171:test_abs_timeouts(): [1] timeout, tick 100 > odp_timer_test.c:171:test_abs_timeouts(): [1] timeout, tick 100 > odp_timer_test.c:171:test_abs_timeouts(): [1] timeout, tick 100 > odp_timer_test.c:171:test_abs_timeouts(): [1] timeout, tick 100 > odp_timer_test.c:171:test_abs_timeouts(): [1] timeout, tick 200 > odp_timer_test.c:171:test_abs_timeouts(): [1] timeout, tick 200 > odp_timer_test.c:171:test_abs_timeouts(): [1] timeout, tick 200 > odp_timer_test.c:171:test_abs_timeouts(): [1] timeout, tick 200 > > ...lines omitted > > odp_timer_test.c:171:test_abs_timeouts(): [1] timeout, tick 3000 > odp_timer_test.c:171:test_abs_timeouts(): [1] timeout, tick 3000 > odp_timer_test.c:171:test_abs_timeouts(): [1] timeout, tick 3000 > Thread 2 exits > Thread 3 exits > Thread 4 exits > Thread 1 exits > ODP timer test complete > > ------------------------------------------ > However with this patch applied I get this: > > bill@Ubuntu15:~/linaro/maximtimer/example/timer$ ./odp_timer_test > > ODP timer example starts > odp_pool.c:103:odp_pool_init_global(): > Pool init global > odp_pool.c:104:odp_pool_init_global(): pool_entry_s size 8512 > odp_pool.c:105:odp_pool_init_global(): pool_entry_t size 8512 > odp_pool.c:106:odp_pool_init_global(): odp_buffer_hdr_t size 168 > odp_pool.c:107:odp_pool_init_global(): > odp_queue.c:146:odp_queue_init_global():Queue init ... > odp_queue.c:170:odp_queue_init_global():done > odp_queue.c:171:odp_queue_init_global():Queue init global > odp_queue.c:173:odp_queue_init_global(): struct queue_entry_s size 320 > odp_queue.c:175:odp_queue_init_global(): queue_entry_t size 320 > odp_queue.c:176:odp_queue_init_global(): > odp_schedule.c:123:odp_schedule_init_global():Schedule init ... > odp_schedule.c:188:odp_schedule_init_global():done > > ODP system info > --------------- > ODP API version: 1.6.0 > CPU model: Intel(R) Core(TM) i7-4790K CPU @ 4.00GHz > CPU freq (hz): 3990762000 > Cache line size: 64 > Max CPU count: 4 > > odp_cpumask_task.c:44:odp_cpumask_default_worker(): > CPU0 will be used for both control and worker threads, > this will likely have a performance impact on the worker thread. > num worker threads: 4 > first CPU: 0 > cpu mask: 0xF > resolution: 10000 usec > min timeout: 0 usec > max timeout: 10000000 usec > period: 1000000 usec > timeouts: 30 > odp_timer.c:729:itimer_init():Creating POSIX timer for timer pool > timer_pool, period 10000000 ns > Shared memory > -------------- > page size: 4 kB > huge page size: 2048 kB > id name kB align huge addr > 0 odp_thread_globals 33 64 * 0x7f02c2400000 > 1 odp_buffer_pools 133 8512 * 0x7f02c2201b80 > 2 odp_queues 320 320 * 0x7f02c20000c0 > 3 odp_scheduler 10 64 * 0x7f02c1e00000 > 4 odp_sched_pool 204 4096 * 0x7f02c1c00000 > 5 odp_pktio_entries 96 1536 * 0x7f02c1a00400 > 6 crypto_pool 15 64 * 0x7f02c1800000 > 7 shm_odp_cos_tbl 8 128 * 0x7f02c1600000 > 8 shm_odp_pmr_tbl 4 64 * 0x7f02c1400000 > 9 shm_odp_pmr_set_tbl 20 320 * 0x7f02c1200100 > 10 shm_test_globals 4 64 * 0x7f02c1000000 > 11 msg_pool 1876 4096 * 0x7f02c0e00000 > 12 timer_pool 0 64 0x7f02c39d8000 > Timer pool > ---------- > name: timer_pool > resolution: 10000000 ns > min tmo: 0 ticks > max tmo: 10000000000 ticks > > CPU freq 3990762000 Hz > Timer ticks vs nanoseconds: > 0 ns -> 0 ticks > 0 ticks -> 0 ns > 1 ns -> 0 ticks > 0 ticks -> 0 ns > 10 ns -> 0 ticks > 0 ticks -> 0 ns > 100 ns -> 0 ticks > 0 ticks -> 0 ns > 1000 ns -> 0 ticks > 0 ticks -> 0 ns > 10000 ns -> 0 ticks > 0 ticks -> 0 ns > 100000 ns -> 0 ticks > 0 ticks -> 0 ns > 1000000 ns -> 0 ticks > 0 ticks -> 0 ns > 10000000 ns -> 1 ticks > 1 ticks -> 10000000 ns > 100000000 ns -> 10 ticks > 10 ticks -> 100000000 ns > 1000000000 ns -> 100 ticks > 100 ticks -> 1000000000 ns > 10000000000 ns -> 1000 ticks > 1000 ticks -> 10000000000 ns > 100000000000 ns -> 10000 ticks > 10000 ticks -> 100000000000 ns > > Thread 1 starts on cpu 0 > Thread 2 starts on cpu 1 > Thread 3 starts on cpu 3 > Thread 4 starts on cpu 2 > odp_timer_test.c:94:test_abs_timeouts(): [3] test_timeouts > odp_timer_test.c:102:test_abs_timeouts(): [3] period 100 ticks, > 1000000000 ns > odp_timer_test.c:105:test_abs_timeouts(): [3] current tick 0 > odp_timer_test.c:94:test_abs_timeouts(): [2] test_timeouts > odp_timer_test.c:102:test_abs_timeouts(): [2] period 100 ticks, > 1000000000 ns > odp_timer_test.c:105:test_abs_timeouts(): [2] current tick 0 > odp_timer_test.c:94:test_abs_timeouts(): [4] test_timeouts > odp_timer_test.c:102:test_abs_timeouts(): [4] period 100 ticks, > 1000000000 ns > odp_timer_test.c:105:test_abs_timeouts(): [4] current tick 0 > odp_timer_test.c:94:test_abs_timeouts(): [1] test_timeouts > odp_timer_test.c:102:test_abs_timeouts(): [1] period 100 ticks, > 1000000000 ns > odp_timer_test.c:105:test_abs_timeouts(): [1] current tick 0 > Alarm clock > bill@Ubuntu15:~/linaro/maximtimer/example/timer$ > > > On Wed, Feb 3, 2016 at 3:05 AM, Maxim Uvarov <maxim.uvarov@linaro.org > <mailto:maxim.uvarov@linaro.org>> wrote: > > Switch timer to use SIGEV_THREAD_ID instead of SIGEV_THREAD. > I.e. do not start timer handle thread on each timer action. > Start timer handle manually and wait for signal there. > This patch also fixes nasty bug with hanging timer threads, > which wants to access to timer pool and free shm on pool > destroy action. > > Signed-off-by: Maxim Uvarov <maxim.uvarov@linaro.org > <mailto:maxim.uvarov@linaro.org>> > --- > platform/linux-generic/odp_timer.c | 106 > ++++++++++++++++++++++++++++++------- > 1 file changed, 87 insertions(+), 19 deletions(-) > > diff --git a/platform/linux-generic/odp_timer.c > b/platform/linux-generic/odp_timer.c > index 1001af8..8bc8cc8 100644 > --- a/platform/linux-generic/odp_timer.c > +++ b/platform/linux-generic/odp_timer.c > @@ -27,6 +27,10 @@ > #include <stdlib.h> > #include <time.h> > #include <signal.h> > +#include <pthread.h> > +#include <unistd.h> > +#include <sys/syscall.h> > + > #include <odp/align.h> > #include <odp_align_internal.h> > #include <odp/atomic.h> > @@ -159,7 +163,6 @@ typedef struct odp_timer_pool_s { > tick_buf_t *tick_buf; /* Expiration tick and timeout buffer */ > odp_timer *timers; /* User pointer and queue handle (and > lock) */ > odp_atomic_u32_t high_wm;/* High watermark of allocated > timers */ > - odp_spinlock_t itimer_running; > odp_spinlock_t lock; > uint32_t num_alloc;/* Current number of allocated timers */ > uint32_t first_free;/* 0..max_timers-1 => free timer */ > @@ -169,6 +172,9 @@ typedef struct odp_timer_pool_s { > odp_shm_t shm; > timer_t timerid; > int notify_overrun; > + pthread_t timer_thread; /* pthread_t of timer thread */ > + pid_t timer_thread_id; /* gettid() for timer thread */ > + int timer_thread_exit; /* request to exit for timer thread */ > } odp_timer_pool; > > #define MAX_TIMER_POOLS 255 /* Leave one for ODP_TIMER_INVALID */ > @@ -254,26 +260,47 @@ static odp_timer_pool *odp_timer_pool_new( > } > tp->tp_idx = tp_idx; > odp_spinlock_init(&tp->lock); > - odp_spinlock_init(&tp->itimer_running); > timer_pool[tp_idx] = tp; > if (tp->param.clk_src == ODP_CLOCK_CPU) > itimer_init(tp); > return tp; > } > > +static void block_sigalarm(void) > +{ > + sigset_t sigset; > + > + sigemptyset(&sigset); > + sigaddset(&sigset, SIGALRM); > + sigprocmask(SIG_BLOCK, &sigset, NULL); > +} > + > +static void stop_timer_thread(odp_timer_pool *tp) > +{ > + int ret; > + > + tp->timer_thread_exit = 1; > + ret = pthread_join(tp->timer_thread, NULL); > + if (ret != 0) > + ODP_ABORT("unable to join thread, err %d\n", ret); > +} > + > static void odp_timer_pool_del(odp_timer_pool *tp) > { > odp_spinlock_lock(&tp->lock); > timer_pool[tp->tp_idx] = NULL; > - /* Wait for itimer thread to stop running */ > - odp_spinlock_lock(&tp->itimer_running); > + > + /* Stop timer triggering */ > + if (tp->param.clk_src == ODP_CLOCK_CPU) > + itimer_fini(tp); > + > + stop_timer_thread(tp); > + > if (tp->num_alloc != 0) { > /* It's a programming error to attempt to destroy a */ > /* timer pool which is still in use */ > ODP_ABORT("%s: timers in use\n", tp->name); > } > - if (tp->param.clk_src == ODP_CLOCK_CPU) > - itimer_fini(tp); > int rc = odp_shm_free(tp->shm); > if (rc != 0) > ODP_ABORT("Failed to free shared memory (%d)\n", rc); > @@ -632,10 +659,10 @@ static unsigned > odp_timer_pool_expire(odp_timer_pool_t tpid, uint64_t tick) > * Functions that use Linux/POSIX per-process timers and related > facilities > *****************************************************************************/ > > -static void timer_notify(sigval_t sigval) > +static void timer_notify(odp_timer_pool *tp) > { > int overrun; > - odp_timer_pool *tp = (odp_timer_pool *)sigval.sival_ptr; > + int64_t prev_tick; > > if (tp->notify_overrun) { > overrun = timer_getoverrun(tp->timerid); > @@ -653,32 +680,72 @@ static void timer_notify(sigval_t sigval) > for (i = 0; i < 32; i += ODP_CACHE_LINE_SIZE / > sizeof(array[0])) > PREFETCH(&array[i]); > #endif > - uint64_t prev_tick = odp_atomic_fetch_inc_u64(&tp->cur_tick); > - /* Attempt to acquire the lock, check if the old value was > clear */ > - if (odp_spinlock_trylock(&tp->itimer_running)) { > - /* Scan timer array, looking for timers to expire */ > - (void)odp_timer_pool_expire(tp, prev_tick); > - odp_spinlock_unlock(&tp->itimer_running); > - } > + prev_tick = odp_atomic_fetch_inc_u64(&tp->cur_tick); > + > + /* Scan timer array, looking for timers to expire */ > + (void)odp_timer_pool_expire(tp, prev_tick); > + > /* Else skip scan of timers. cur_tick was updated and next > itimer > * invocation will process older expiration ticks as well */ > } > > +static void *timer_thread(void *arg) > +{ > + odp_timer_pool *tp = (odp_timer_pool *)arg; > + sigset_t sigset; > + int ret; > + struct timespec tmo; > + siginfo_t si; > + > + tp->timer_thread_id = (pid_t)syscall(SYS_gettid); > + > + tmo.tv_sec = 0; > + tmo.tv_nsec = ODP_TIME_MSEC_IN_NS * 100; > + > + sigemptyset(&sigset); > + sigaddset(&sigset, SIGALRM); > + > + while (1) { > + ret = sigtimedwait(&sigset, &si, &tmo); > + if (tp->timer_thread_exit) { > + tp->timer_thread_id = 0; > + return NULL; > + } > + if (ret == 0) > + timer_notify(tp); > + } > + > + return NULL; > +} > + > static void itimer_init(odp_timer_pool *tp) > { > struct sigevent sigev; > struct itimerspec ispec; > uint64_t res, sec, nsec; > + int ret; > > ODP_DBG("Creating POSIX timer for timer pool %s, period %" > PRIu64" ns\n", tp->name, tp->param.res_ns); > > + tp->timer_thread_id = 0; > + ret = pthread_create(&tp->timer_thread, NULL, > timer_thread, tp); > + if (ret) > + ODP_ABORT("unable to create timer thread\n"); > + > + /* wait thread set tp->timer_thread_id */ > + do { > + sched_yield(); > + } while (tp->timer_thread_id == 0); > + > + /* Block sigalarm in current thread */ > + block_sigalarm(); > + > memset(&sigev, 0, sizeof(sigev)); > - memset(&ispec, 0, sizeof(ispec)); > - > - sigev.sigev_notify = SIGEV_THREAD; > - sigev.sigev_notify_function = timer_notify; > + sigev.sigev_notify = SIGEV_THREAD_ID; > sigev.sigev_value.sival_ptr = tp; > + sigev._sigev_un._tid = tp->timer_thread_id; > + sigev.sigev_signo = SIGALRM; > > if (timer_create(CLOCK_MONOTONIC, &sigev, &tp->timerid)) > ODP_ABORT("timer_create() returned error %s\n", > @@ -688,6 +755,7 @@ static void itimer_init(odp_timer_pool *tp) > sec = res / ODP_TIME_SEC_IN_NS; > nsec = res - sec * ODP_TIME_SEC_IN_NS; > > + memset(&ispec, 0, sizeof(ispec)); > ispec.it_interval.tv_sec = (time_t)sec; > ispec.it_interval.tv_nsec = (long)nsec; > ispec.it_value.tv_sec = (time_t)sec; > -- > 1.9.1 > > _______________________________________________ > lng-odp mailing list > lng-odp@lists.linaro.org <mailto:lng-odp@lists.linaro.org> > https://lists.linaro.org/mailman/listinfo/lng-odp > >
OK, however it was marked PATCH and addressed a bug, not an API change, so I assumed it was intended for master. On Wed, Feb 3, 2016 at 11:03 AM, Maxim Uvarov <maxim.uvarov@linaro.org> wrote: > btw, it's for api-next, I did not test it on master. But I think result > will be the same. > > Maxim. > > On 02/03/2016 18:48, Bill Fischofer wrote: > >> I'm not sure this is working properly. Running >> examples/timer/odp_timer_test without this patch I see this sort of output: >> >> bill@Ubuntu15:~/linaro/odp/example/timer$ ./odp_timer_test >> >> ODP timer example starts >> odp_pool.c:103:odp_pool_init_global(): >> Pool init global >> odp_pool.c:104:odp_pool_init_global(): pool_entry_s size 8512 >> odp_pool.c:105:odp_pool_init_global(): pool_entry_t size 8512 >> odp_pool.c:106:odp_pool_init_global(): odp_buffer_hdr_t size 168 >> odp_pool.c:107:odp_pool_init_global(): >> odp_queue.c:146:odp_queue_init_global():Queue init ... >> odp_queue.c:170:odp_queue_init_global():done >> odp_queue.c:171:odp_queue_init_global():Queue init global >> odp_queue.c:173:odp_queue_init_global(): struct queue_entry_s size 320 >> odp_queue.c:175:odp_queue_init_global(): queue_entry_t size 320 >> odp_queue.c:176:odp_queue_init_global(): >> odp_schedule.c:123:odp_schedule_init_global():Schedule init ... >> odp_schedule.c:188:odp_schedule_init_global():done >> >> ODP system info >> --------------- >> ODP API version: 1.6.0 >> CPU model: Intel(R) Core(TM) i7-4790K CPU @ 4.00GHz >> CPU freq (hz): 3990762000 >> Cache line size: 64 >> Max CPU count: 4 >> >> odp_cpumask_task.c:44:odp_cpumask_default_worker(): >> CPU0 will be used for both control and worker threads, >> this will likely have a performance impact on the worker thread. >> num worker threads: 4 >> first CPU: 0 >> cpu mask: 0xF >> resolution: 10000 usec >> min timeout: 0 usec >> max timeout: 10000000 usec >> period: 1000000 usec >> timeouts: 30 >> odp_timer.c:674:itimer_init():Creating POSIX timer for timer pool >> timer_pool, period 10000000 ns >> Shared memory >> -------------- >> page size: 4 kB >> huge page size: 2048 kB >> id name kB align huge addr >> 0 odp_thread_globals 33 64 * 0x7f2f66400000 >> 1 odp_buffer_pools 133 8512 * 0x7f2f66201e00 >> 2 odp_queues 320 320 * 0x7f2f66000040 >> 3 odp_scheduler 10 64 * 0x7f2f65e00000 >> 4 odp_sched_pool 204 4096 * 0x7f2f65c00000 >> 5 odp_pktio_entries 96 1536 * 0x7f2f65a00000 >> 6 crypto_pool 15 64 * 0x7f2f65800000 >> 7 shm_odp_cos_tbl 8 128 * 0x7f2f65600000 >> 8 shm_odp_pmr_tbl 4 64 * 0x7f2f65400000 >> 9 shm_odp_pmr_set_tbl 20 320 * 0x7f2f65200080 >> 10 shm_test_globals 4 64 * 0x7f2f65000000 >> 11 msg_pool 1876 4096 * 0x7f2f64e00000 >> 12 timer_pool 0 64 0x7f2f67a05000 >> Timer pool >> ---------- >> name: timer_pool >> resolution: 10000000 ns >> min tmo: 0 ticks >> max tmo: 10000000000 ticks >> >> CPU freq 3990762000 Hz >> Timer ticks vs nanoseconds: >> 0 ns -> 0 ticks >> 0 ticks -> 0 ns >> 1 ns -> 0 ticks >> 0 ticks -> 0 ns >> 10 ns -> 0 ticks >> 0 ticks -> 0 ns >> 100 ns -> 0 ticks >> 0 ticks -> 0 ns >> 1000 ns -> 0 ticks >> 0 ticks -> 0 ns >> 10000 ns -> 0 ticks >> 0 ticks -> 0 ns >> 100000 ns -> 0 ticks >> 0 ticks -> 0 ns >> 1000000 ns -> 0 ticks >> 0 ticks -> 0 ns >> 10000000 ns -> 1 ticks >> 1 ticks -> 10000000 ns >> 100000000 ns -> 10 ticks >> 10 ticks -> 100000000 ns >> 1000000000 ns -> 100 ticks >> 100 ticks -> 1000000000 ns >> 10000000000 ns -> 1000 ticks >> 1000 ticks -> 10000000000 ns >> 100000000000 ns -> 10000 ticks >> 10000 ticks -> 100000000000 ns >> >> Thread 1 starts on cpu 0 >> Thread 2 starts on cpu 1 >> Thread 3 starts on cpu 3 >> Thread 4 starts on cpu 2 >> odp_timer_test.c:94:test_abs_timeouts(): [4] test_timeouts >> odp_timer_test.c:102:test_abs_timeouts(): [4] period 100 ticks, >> 1000000000 ns >> odp_timer_test.c:105:test_abs_timeouts(): [4] current tick 0 >> odp_timer_test.c:94:test_abs_timeouts(): [3] test_timeouts >> odp_timer_test.c:102:test_abs_timeouts(): [3] period 100 ticks, >> 1000000000 ns >> odp_timer_test.c:105:test_abs_timeouts(): [3] current tick 0 >> odp_timer_test.c:94:test_abs_timeouts(): [1] test_timeouts >> odp_timer_test.c:102:test_abs_timeouts(): [1] period 100 ticks, >> 1000000000 ns >> odp_timer_test.c:105:test_abs_timeouts(): [1] current tick 0 >> odp_timer_test.c:94:test_abs_timeouts(): [2] test_timeouts >> odp_timer_test.c:102:test_abs_timeouts(): [2] period 100 ticks, >> 1000000000 ns >> odp_timer_test.c:105:test_abs_timeouts(): [2] current tick 0 >> odp_timer_test.c:171:test_abs_timeouts(): [1] timeout, tick 100 >> odp_timer_test.c:171:test_abs_timeouts(): [1] timeout, tick 100 >> odp_timer_test.c:171:test_abs_timeouts(): [1] timeout, tick 100 >> odp_timer_test.c:171:test_abs_timeouts(): [1] timeout, tick 100 >> odp_timer_test.c:171:test_abs_timeouts(): [1] timeout, tick 200 >> odp_timer_test.c:171:test_abs_timeouts(): [1] timeout, tick 200 >> odp_timer_test.c:171:test_abs_timeouts(): [1] timeout, tick 200 >> odp_timer_test.c:171:test_abs_timeouts(): [1] timeout, tick 200 >> >> ...lines omitted >> >> odp_timer_test.c:171:test_abs_timeouts(): [1] timeout, tick 3000 >> odp_timer_test.c:171:test_abs_timeouts(): [1] timeout, tick 3000 >> odp_timer_test.c:171:test_abs_timeouts(): [1] timeout, tick 3000 >> Thread 2 exits >> Thread 3 exits >> Thread 4 exits >> Thread 1 exits >> ODP timer test complete >> >> ------------------------------------------ >> However with this patch applied I get this: >> >> bill@Ubuntu15:~/linaro/maximtimer/example/timer$ ./odp_timer_test >> >> ODP timer example starts >> odp_pool.c:103:odp_pool_init_global(): >> Pool init global >> odp_pool.c:104:odp_pool_init_global(): pool_entry_s size 8512 >> odp_pool.c:105:odp_pool_init_global(): pool_entry_t size 8512 >> odp_pool.c:106:odp_pool_init_global(): odp_buffer_hdr_t size 168 >> odp_pool.c:107:odp_pool_init_global(): >> odp_queue.c:146:odp_queue_init_global():Queue init ... >> odp_queue.c:170:odp_queue_init_global():done >> odp_queue.c:171:odp_queue_init_global():Queue init global >> odp_queue.c:173:odp_queue_init_global(): struct queue_entry_s size 320 >> odp_queue.c:175:odp_queue_init_global(): queue_entry_t size 320 >> odp_queue.c:176:odp_queue_init_global(): >> odp_schedule.c:123:odp_schedule_init_global():Schedule init ... >> odp_schedule.c:188:odp_schedule_init_global():done >> >> ODP system info >> --------------- >> ODP API version: 1.6.0 >> CPU model: Intel(R) Core(TM) i7-4790K CPU @ 4.00GHz >> CPU freq (hz): 3990762000 >> Cache line size: 64 >> Max CPU count: 4 >> >> odp_cpumask_task.c:44:odp_cpumask_default_worker(): >> CPU0 will be used for both control and worker threads, >> this will likely have a performance impact on the worker thread. >> num worker threads: 4 >> first CPU: 0 >> cpu mask: 0xF >> resolution: 10000 usec >> min timeout: 0 usec >> max timeout: 10000000 usec >> period: 1000000 usec >> timeouts: 30 >> odp_timer.c:729:itimer_init():Creating POSIX timer for timer pool >> timer_pool, period 10000000 ns >> Shared memory >> -------------- >> page size: 4 kB >> huge page size: 2048 kB >> id name kB align huge addr >> 0 odp_thread_globals 33 64 * 0x7f02c2400000 >> 1 odp_buffer_pools 133 8512 * 0x7f02c2201b80 >> 2 odp_queues 320 320 * 0x7f02c20000c0 >> 3 odp_scheduler 10 64 * 0x7f02c1e00000 >> 4 odp_sched_pool 204 4096 * 0x7f02c1c00000 >> 5 odp_pktio_entries 96 1536 * 0x7f02c1a00400 >> 6 crypto_pool 15 64 * 0x7f02c1800000 >> 7 shm_odp_cos_tbl 8 128 * 0x7f02c1600000 >> 8 shm_odp_pmr_tbl 4 64 * 0x7f02c1400000 >> 9 shm_odp_pmr_set_tbl 20 320 * 0x7f02c1200100 >> 10 shm_test_globals 4 64 * 0x7f02c1000000 >> 11 msg_pool 1876 4096 * 0x7f02c0e00000 >> 12 timer_pool 0 64 0x7f02c39d8000 >> Timer pool >> ---------- >> name: timer_pool >> resolution: 10000000 ns >> min tmo: 0 ticks >> max tmo: 10000000000 ticks >> >> CPU freq 3990762000 Hz >> Timer ticks vs nanoseconds: >> 0 ns -> 0 ticks >> 0 ticks -> 0 ns >> 1 ns -> 0 ticks >> 0 ticks -> 0 ns >> 10 ns -> 0 ticks >> 0 ticks -> 0 ns >> 100 ns -> 0 ticks >> 0 ticks -> 0 ns >> 1000 ns -> 0 ticks >> 0 ticks -> 0 ns >> 10000 ns -> 0 ticks >> 0 ticks -> 0 ns >> 100000 ns -> 0 ticks >> 0 ticks -> 0 ns >> 1000000 ns -> 0 ticks >> 0 ticks -> 0 ns >> 10000000 ns -> 1 ticks >> 1 ticks -> 10000000 ns >> 100000000 ns -> 10 ticks >> 10 ticks -> 100000000 ns >> 1000000000 ns -> 100 ticks >> 100 ticks -> 1000000000 ns >> 10000000000 ns -> 1000 ticks >> 1000 ticks -> 10000000000 ns >> 100000000000 ns -> 10000 ticks >> 10000 ticks -> 100000000000 ns >> >> Thread 1 starts on cpu 0 >> Thread 2 starts on cpu 1 >> Thread 3 starts on cpu 3 >> Thread 4 starts on cpu 2 >> odp_timer_test.c:94:test_abs_timeouts(): [3] test_timeouts >> odp_timer_test.c:102:test_abs_timeouts(): [3] period 100 ticks, >> 1000000000 ns >> odp_timer_test.c:105:test_abs_timeouts(): [3] current tick 0 >> odp_timer_test.c:94:test_abs_timeouts(): [2] test_timeouts >> odp_timer_test.c:102:test_abs_timeouts(): [2] period 100 ticks, >> 1000000000 ns >> odp_timer_test.c:105:test_abs_timeouts(): [2] current tick 0 >> odp_timer_test.c:94:test_abs_timeouts(): [4] test_timeouts >> odp_timer_test.c:102:test_abs_timeouts(): [4] period 100 ticks, >> 1000000000 ns >> odp_timer_test.c:105:test_abs_timeouts(): [4] current tick 0 >> odp_timer_test.c:94:test_abs_timeouts(): [1] test_timeouts >> odp_timer_test.c:102:test_abs_timeouts(): [1] period 100 ticks, >> 1000000000 ns >> odp_timer_test.c:105:test_abs_timeouts(): [1] current tick 0 >> Alarm clock >> bill@Ubuntu15:~/linaro/maximtimer/example/timer$ >> >> >> On Wed, Feb 3, 2016 at 3:05 AM, Maxim Uvarov <maxim.uvarov@linaro.org >> <mailto:maxim.uvarov@linaro.org>> wrote: >> >> Switch timer to use SIGEV_THREAD_ID instead of SIGEV_THREAD. >> I.e. do not start timer handle thread on each timer action. >> Start timer handle manually and wait for signal there. >> This patch also fixes nasty bug with hanging timer threads, >> which wants to access to timer pool and free shm on pool >> destroy action. >> >> Signed-off-by: Maxim Uvarov <maxim.uvarov@linaro.org >> <mailto:maxim.uvarov@linaro.org>> >> >> --- >> platform/linux-generic/odp_timer.c | 106 >> ++++++++++++++++++++++++++++++------- >> 1 file changed, 87 insertions(+), 19 deletions(-) >> >> diff --git a/platform/linux-generic/odp_timer.c >> b/platform/linux-generic/odp_timer.c >> index 1001af8..8bc8cc8 100644 >> --- a/platform/linux-generic/odp_timer.c >> +++ b/platform/linux-generic/odp_timer.c >> @@ -27,6 +27,10 @@ >> #include <stdlib.h> >> #include <time.h> >> #include <signal.h> >> +#include <pthread.h> >> +#include <unistd.h> >> +#include <sys/syscall.h> >> + >> #include <odp/align.h> >> #include <odp_align_internal.h> >> #include <odp/atomic.h> >> @@ -159,7 +163,6 @@ typedef struct odp_timer_pool_s { >> tick_buf_t *tick_buf; /* Expiration tick and timeout buffer */ >> odp_timer *timers; /* User pointer and queue handle (and >> lock) */ >> odp_atomic_u32_t high_wm;/* High watermark of allocated >> timers */ >> - odp_spinlock_t itimer_running; >> odp_spinlock_t lock; >> uint32_t num_alloc;/* Current number of allocated timers */ >> uint32_t first_free;/* 0..max_timers-1 => free timer */ >> @@ -169,6 +172,9 @@ typedef struct odp_timer_pool_s { >> odp_shm_t shm; >> timer_t timerid; >> int notify_overrun; >> + pthread_t timer_thread; /* pthread_t of timer thread */ >> + pid_t timer_thread_id; /* gettid() for timer thread */ >> + int timer_thread_exit; /* request to exit for timer thread */ >> } odp_timer_pool; >> >> #define MAX_TIMER_POOLS 255 /* Leave one for ODP_TIMER_INVALID */ >> @@ -254,26 +260,47 @@ static odp_timer_pool *odp_timer_pool_new( >> } >> tp->tp_idx = tp_idx; >> odp_spinlock_init(&tp->lock); >> - odp_spinlock_init(&tp->itimer_running); >> timer_pool[tp_idx] = tp; >> if (tp->param.clk_src == ODP_CLOCK_CPU) >> itimer_init(tp); >> return tp; >> } >> >> +static void block_sigalarm(void) >> +{ >> + sigset_t sigset; >> + >> + sigemptyset(&sigset); >> + sigaddset(&sigset, SIGALRM); >> + sigprocmask(SIG_BLOCK, &sigset, NULL); >> +} >> + >> +static void stop_timer_thread(odp_timer_pool *tp) >> +{ >> + int ret; >> + >> + tp->timer_thread_exit = 1; >> + ret = pthread_join(tp->timer_thread, NULL); >> + if (ret != 0) >> + ODP_ABORT("unable to join thread, err %d\n", ret); >> +} >> + >> static void odp_timer_pool_del(odp_timer_pool *tp) >> { >> odp_spinlock_lock(&tp->lock); >> timer_pool[tp->tp_idx] = NULL; >> - /* Wait for itimer thread to stop running */ >> - odp_spinlock_lock(&tp->itimer_running); >> + >> + /* Stop timer triggering */ >> + if (tp->param.clk_src == ODP_CLOCK_CPU) >> + itimer_fini(tp); >> + >> + stop_timer_thread(tp); >> + >> if (tp->num_alloc != 0) { >> /* It's a programming error to attempt to destroy a */ >> /* timer pool which is still in use */ >> ODP_ABORT("%s: timers in use\n", tp->name); >> } >> - if (tp->param.clk_src == ODP_CLOCK_CPU) >> - itimer_fini(tp); >> int rc = odp_shm_free(tp->shm); >> if (rc != 0) >> ODP_ABORT("Failed to free shared memory (%d)\n", rc); >> @@ -632,10 +659,10 @@ static unsigned >> odp_timer_pool_expire(odp_timer_pool_t tpid, uint64_t tick) >> * Functions that use Linux/POSIX per-process timers and related >> facilities >> >> *****************************************************************************/ >> >> -static void timer_notify(sigval_t sigval) >> +static void timer_notify(odp_timer_pool *tp) >> { >> int overrun; >> - odp_timer_pool *tp = (odp_timer_pool *)sigval.sival_ptr; >> + int64_t prev_tick; >> >> if (tp->notify_overrun) { >> overrun = timer_getoverrun(tp->timerid); >> @@ -653,32 +680,72 @@ static void timer_notify(sigval_t sigval) >> for (i = 0; i < 32; i += ODP_CACHE_LINE_SIZE / >> sizeof(array[0])) >> PREFETCH(&array[i]); >> #endif >> - uint64_t prev_tick = odp_atomic_fetch_inc_u64(&tp->cur_tick); >> - /* Attempt to acquire the lock, check if the old value was >> clear */ >> - if (odp_spinlock_trylock(&tp->itimer_running)) { >> - /* Scan timer array, looking for timers to expire */ >> - (void)odp_timer_pool_expire(tp, prev_tick); >> - odp_spinlock_unlock(&tp->itimer_running); >> - } >> + prev_tick = odp_atomic_fetch_inc_u64(&tp->cur_tick); >> + >> + /* Scan timer array, looking for timers to expire */ >> + (void)odp_timer_pool_expire(tp, prev_tick); >> + >> /* Else skip scan of timers. cur_tick was updated and next >> itimer >> * invocation will process older expiration ticks as well */ >> } >> >> +static void *timer_thread(void *arg) >> +{ >> + odp_timer_pool *tp = (odp_timer_pool *)arg; >> + sigset_t sigset; >> + int ret; >> + struct timespec tmo; >> + siginfo_t si; >> + >> + tp->timer_thread_id = (pid_t)syscall(SYS_gettid); >> + >> + tmo.tv_sec = 0; >> + tmo.tv_nsec = ODP_TIME_MSEC_IN_NS * 100; >> + >> + sigemptyset(&sigset); >> + sigaddset(&sigset, SIGALRM); >> + >> + while (1) { >> + ret = sigtimedwait(&sigset, &si, &tmo); >> + if (tp->timer_thread_exit) { >> + tp->timer_thread_id = 0; >> + return NULL; >> + } >> + if (ret == 0) >> + timer_notify(tp); >> + } >> + >> + return NULL; >> +} >> + >> static void itimer_init(odp_timer_pool *tp) >> { >> struct sigevent sigev; >> struct itimerspec ispec; >> uint64_t res, sec, nsec; >> + int ret; >> >> ODP_DBG("Creating POSIX timer for timer pool %s, period %" >> PRIu64" ns\n", tp->name, tp->param.res_ns); >> >> + tp->timer_thread_id = 0; >> + ret = pthread_create(&tp->timer_thread, NULL, >> timer_thread, tp); >> + if (ret) >> + ODP_ABORT("unable to create timer thread\n"); >> + >> + /* wait thread set tp->timer_thread_id */ >> + do { >> + sched_yield(); >> + } while (tp->timer_thread_id == 0); >> + >> + /* Block sigalarm in current thread */ >> + block_sigalarm(); >> + >> memset(&sigev, 0, sizeof(sigev)); >> - memset(&ispec, 0, sizeof(ispec)); >> - >> - sigev.sigev_notify = SIGEV_THREAD; >> - sigev.sigev_notify_function = timer_notify; >> + sigev.sigev_notify = SIGEV_THREAD_ID; >> sigev.sigev_value.sival_ptr = tp; >> + sigev._sigev_un._tid = tp->timer_thread_id; >> + sigev.sigev_signo = SIGALRM; >> >> if (timer_create(CLOCK_MONOTONIC, &sigev, &tp->timerid)) >> ODP_ABORT("timer_create() returned error %s\n", >> @@ -688,6 +755,7 @@ static void itimer_init(odp_timer_pool *tp) >> sec = res / ODP_TIME_SEC_IN_NS; >> nsec = res - sec * ODP_TIME_SEC_IN_NS; >> >> + memset(&ispec, 0, sizeof(ispec)); >> ispec.it_interval.tv_sec = (time_t)sec; >> ispec.it_interval.tv_nsec = (long)nsec; >> ispec.it_value.tv_sec = (time_t)sec; >> -- >> 1.9.1 >> >> _______________________________________________ >> lng-odp mailing list >> lng-odp@lists.linaro.org <mailto:lng-odp@lists.linaro.org> >> https://lists.linaro.org/mailman/listinfo/lng-odp >> >> >> >
I just confirmed I get the same behavior if I apply and run the patch against api-next. On Wed, Feb 3, 2016 at 11:04 AM, Bill Fischofer <bill.fischofer@linaro.org> wrote: > OK, however it was marked PATCH and addressed a bug, not an API change, so > I assumed it was intended for master. > > On Wed, Feb 3, 2016 at 11:03 AM, Maxim Uvarov <maxim.uvarov@linaro.org> > wrote: > >> btw, it's for api-next, I did not test it on master. But I think result >> will be the same. >> >> Maxim. >> >> On 02/03/2016 18:48, Bill Fischofer wrote: >> >>> I'm not sure this is working properly. Running >>> examples/timer/odp_timer_test without this patch I see this sort of output: >>> >>> bill@Ubuntu15:~/linaro/odp/example/timer$ ./odp_timer_test >>> >>> ODP timer example starts >>> odp_pool.c:103:odp_pool_init_global(): >>> Pool init global >>> odp_pool.c:104:odp_pool_init_global(): pool_entry_s size 8512 >>> odp_pool.c:105:odp_pool_init_global(): pool_entry_t size 8512 >>> odp_pool.c:106:odp_pool_init_global(): odp_buffer_hdr_t size 168 >>> odp_pool.c:107:odp_pool_init_global(): >>> odp_queue.c:146:odp_queue_init_global():Queue init ... >>> odp_queue.c:170:odp_queue_init_global():done >>> odp_queue.c:171:odp_queue_init_global():Queue init global >>> odp_queue.c:173:odp_queue_init_global(): struct queue_entry_s size 320 >>> odp_queue.c:175:odp_queue_init_global(): queue_entry_t size 320 >>> odp_queue.c:176:odp_queue_init_global(): >>> odp_schedule.c:123:odp_schedule_init_global():Schedule init ... >>> odp_schedule.c:188:odp_schedule_init_global():done >>> >>> ODP system info >>> --------------- >>> ODP API version: 1.6.0 >>> CPU model: Intel(R) Core(TM) i7-4790K CPU @ 4.00GHz >>> CPU freq (hz): 3990762000 >>> Cache line size: 64 >>> Max CPU count: 4 >>> >>> odp_cpumask_task.c:44:odp_cpumask_default_worker(): >>> CPU0 will be used for both control and worker threads, >>> this will likely have a performance impact on the worker thread. >>> num worker threads: 4 >>> first CPU: 0 >>> cpu mask: 0xF >>> resolution: 10000 usec >>> min timeout: 0 usec >>> max timeout: 10000000 usec >>> period: 1000000 usec >>> timeouts: 30 >>> odp_timer.c:674:itimer_init():Creating POSIX timer for timer pool >>> timer_pool, period 10000000 ns >>> Shared memory >>> -------------- >>> page size: 4 kB >>> huge page size: 2048 kB >>> id name kB align huge addr >>> 0 odp_thread_globals 33 64 * 0x7f2f66400000 >>> 1 odp_buffer_pools 133 8512 * 0x7f2f66201e00 >>> 2 odp_queues 320 320 * 0x7f2f66000040 >>> 3 odp_scheduler 10 64 * 0x7f2f65e00000 >>> 4 odp_sched_pool 204 4096 * 0x7f2f65c00000 >>> 5 odp_pktio_entries 96 1536 * 0x7f2f65a00000 >>> 6 crypto_pool 15 64 * 0x7f2f65800000 >>> 7 shm_odp_cos_tbl 8 128 * 0x7f2f65600000 >>> 8 shm_odp_pmr_tbl 4 64 * 0x7f2f65400000 >>> 9 shm_odp_pmr_set_tbl 20 320 * 0x7f2f65200080 >>> 10 shm_test_globals 4 64 * 0x7f2f65000000 >>> 11 msg_pool 1876 4096 * 0x7f2f64e00000 >>> 12 timer_pool 0 64 0x7f2f67a05000 >>> Timer pool >>> ---------- >>> name: timer_pool >>> resolution: 10000000 ns >>> min tmo: 0 ticks >>> max tmo: 10000000000 ticks >>> >>> CPU freq 3990762000 Hz >>> Timer ticks vs nanoseconds: >>> 0 ns -> 0 ticks >>> 0 ticks -> 0 ns >>> 1 ns -> 0 ticks >>> 0 ticks -> 0 ns >>> 10 ns -> 0 ticks >>> 0 ticks -> 0 ns >>> 100 ns -> 0 ticks >>> 0 ticks -> 0 ns >>> 1000 ns -> 0 ticks >>> 0 ticks -> 0 ns >>> 10000 ns -> 0 ticks >>> 0 ticks -> 0 ns >>> 100000 ns -> 0 ticks >>> 0 ticks -> 0 ns >>> 1000000 ns -> 0 ticks >>> 0 ticks -> 0 ns >>> 10000000 ns -> 1 ticks >>> 1 ticks -> 10000000 ns >>> 100000000 ns -> 10 ticks >>> 10 ticks -> 100000000 ns >>> 1000000000 ns -> 100 ticks >>> 100 ticks -> 1000000000 ns >>> 10000000000 ns -> 1000 ticks >>> 1000 ticks -> 10000000000 ns >>> 100000000000 ns -> 10000 ticks >>> 10000 ticks -> 100000000000 ns >>> >>> Thread 1 starts on cpu 0 >>> Thread 2 starts on cpu 1 >>> Thread 3 starts on cpu 3 >>> Thread 4 starts on cpu 2 >>> odp_timer_test.c:94:test_abs_timeouts(): [4] test_timeouts >>> odp_timer_test.c:102:test_abs_timeouts(): [4] period 100 ticks, >>> 1000000000 ns >>> odp_timer_test.c:105:test_abs_timeouts(): [4] current tick 0 >>> odp_timer_test.c:94:test_abs_timeouts(): [3] test_timeouts >>> odp_timer_test.c:102:test_abs_timeouts(): [3] period 100 ticks, >>> 1000000000 ns >>> odp_timer_test.c:105:test_abs_timeouts(): [3] current tick 0 >>> odp_timer_test.c:94:test_abs_timeouts(): [1] test_timeouts >>> odp_timer_test.c:102:test_abs_timeouts(): [1] period 100 ticks, >>> 1000000000 ns >>> odp_timer_test.c:105:test_abs_timeouts(): [1] current tick 0 >>> odp_timer_test.c:94:test_abs_timeouts(): [2] test_timeouts >>> odp_timer_test.c:102:test_abs_timeouts(): [2] period 100 ticks, >>> 1000000000 ns >>> odp_timer_test.c:105:test_abs_timeouts(): [2] current tick 0 >>> odp_timer_test.c:171:test_abs_timeouts(): [1] timeout, tick 100 >>> odp_timer_test.c:171:test_abs_timeouts(): [1] timeout, tick 100 >>> odp_timer_test.c:171:test_abs_timeouts(): [1] timeout, tick 100 >>> odp_timer_test.c:171:test_abs_timeouts(): [1] timeout, tick 100 >>> odp_timer_test.c:171:test_abs_timeouts(): [1] timeout, tick 200 >>> odp_timer_test.c:171:test_abs_timeouts(): [1] timeout, tick 200 >>> odp_timer_test.c:171:test_abs_timeouts(): [1] timeout, tick 200 >>> odp_timer_test.c:171:test_abs_timeouts(): [1] timeout, tick 200 >>> >>> ...lines omitted >>> >>> odp_timer_test.c:171:test_abs_timeouts(): [1] timeout, tick 3000 >>> odp_timer_test.c:171:test_abs_timeouts(): [1] timeout, tick 3000 >>> odp_timer_test.c:171:test_abs_timeouts(): [1] timeout, tick 3000 >>> Thread 2 exits >>> Thread 3 exits >>> Thread 4 exits >>> Thread 1 exits >>> ODP timer test complete >>> >>> ------------------------------------------ >>> However with this patch applied I get this: >>> >>> bill@Ubuntu15:~/linaro/maximtimer/example/timer$ ./odp_timer_test >>> >>> ODP timer example starts >>> odp_pool.c:103:odp_pool_init_global(): >>> Pool init global >>> odp_pool.c:104:odp_pool_init_global(): pool_entry_s size 8512 >>> odp_pool.c:105:odp_pool_init_global(): pool_entry_t size 8512 >>> odp_pool.c:106:odp_pool_init_global(): odp_buffer_hdr_t size 168 >>> odp_pool.c:107:odp_pool_init_global(): >>> odp_queue.c:146:odp_queue_init_global():Queue init ... >>> odp_queue.c:170:odp_queue_init_global():done >>> odp_queue.c:171:odp_queue_init_global():Queue init global >>> odp_queue.c:173:odp_queue_init_global(): struct queue_entry_s size 320 >>> odp_queue.c:175:odp_queue_init_global(): queue_entry_t size 320 >>> odp_queue.c:176:odp_queue_init_global(): >>> odp_schedule.c:123:odp_schedule_init_global():Schedule init ... >>> odp_schedule.c:188:odp_schedule_init_global():done >>> >>> ODP system info >>> --------------- >>> ODP API version: 1.6.0 >>> CPU model: Intel(R) Core(TM) i7-4790K CPU @ 4.00GHz >>> CPU freq (hz): 3990762000 >>> Cache line size: 64 >>> Max CPU count: 4 >>> >>> odp_cpumask_task.c:44:odp_cpumask_default_worker(): >>> CPU0 will be used for both control and worker threads, >>> this will likely have a performance impact on the worker thread. >>> num worker threads: 4 >>> first CPU: 0 >>> cpu mask: 0xF >>> resolution: 10000 usec >>> min timeout: 0 usec >>> max timeout: 10000000 usec >>> period: 1000000 usec >>> timeouts: 30 >>> odp_timer.c:729:itimer_init():Creating POSIX timer for timer pool >>> timer_pool, period 10000000 ns >>> Shared memory >>> -------------- >>> page size: 4 kB >>> huge page size: 2048 kB >>> id name kB align huge addr >>> 0 odp_thread_globals 33 64 * 0x7f02c2400000 >>> 1 odp_buffer_pools 133 8512 * 0x7f02c2201b80 >>> 2 odp_queues 320 320 * 0x7f02c20000c0 >>> 3 odp_scheduler 10 64 * 0x7f02c1e00000 >>> 4 odp_sched_pool 204 4096 * 0x7f02c1c00000 >>> 5 odp_pktio_entries 96 1536 * 0x7f02c1a00400 >>> 6 crypto_pool 15 64 * 0x7f02c1800000 >>> 7 shm_odp_cos_tbl 8 128 * 0x7f02c1600000 >>> 8 shm_odp_pmr_tbl 4 64 * 0x7f02c1400000 >>> 9 shm_odp_pmr_set_tbl 20 320 * 0x7f02c1200100 >>> 10 shm_test_globals 4 64 * 0x7f02c1000000 >>> 11 msg_pool 1876 4096 * 0x7f02c0e00000 >>> 12 timer_pool 0 64 0x7f02c39d8000 >>> Timer pool >>> ---------- >>> name: timer_pool >>> resolution: 10000000 ns >>> min tmo: 0 ticks >>> max tmo: 10000000000 ticks >>> >>> CPU freq 3990762000 Hz >>> Timer ticks vs nanoseconds: >>> 0 ns -> 0 ticks >>> 0 ticks -> 0 ns >>> 1 ns -> 0 ticks >>> 0 ticks -> 0 ns >>> 10 ns -> 0 ticks >>> 0 ticks -> 0 ns >>> 100 ns -> 0 ticks >>> 0 ticks -> 0 ns >>> 1000 ns -> 0 ticks >>> 0 ticks -> 0 ns >>> 10000 ns -> 0 ticks >>> 0 ticks -> 0 ns >>> 100000 ns -> 0 ticks >>> 0 ticks -> 0 ns >>> 1000000 ns -> 0 ticks >>> 0 ticks -> 0 ns >>> 10000000 ns -> 1 ticks >>> 1 ticks -> 10000000 ns >>> 100000000 ns -> 10 ticks >>> 10 ticks -> 100000000 ns >>> 1000000000 ns -> 100 ticks >>> 100 ticks -> 1000000000 ns >>> 10000000000 ns -> 1000 ticks >>> 1000 ticks -> 10000000000 ns >>> 100000000000 ns -> 10000 ticks >>> 10000 ticks -> 100000000000 ns >>> >>> Thread 1 starts on cpu 0 >>> Thread 2 starts on cpu 1 >>> Thread 3 starts on cpu 3 >>> Thread 4 starts on cpu 2 >>> odp_timer_test.c:94:test_abs_timeouts(): [3] test_timeouts >>> odp_timer_test.c:102:test_abs_timeouts(): [3] period 100 ticks, >>> 1000000000 ns >>> odp_timer_test.c:105:test_abs_timeouts(): [3] current tick 0 >>> odp_timer_test.c:94:test_abs_timeouts(): [2] test_timeouts >>> odp_timer_test.c:102:test_abs_timeouts(): [2] period 100 ticks, >>> 1000000000 ns >>> odp_timer_test.c:105:test_abs_timeouts(): [2] current tick 0 >>> odp_timer_test.c:94:test_abs_timeouts(): [4] test_timeouts >>> odp_timer_test.c:102:test_abs_timeouts(): [4] period 100 ticks, >>> 1000000000 ns >>> odp_timer_test.c:105:test_abs_timeouts(): [4] current tick 0 >>> odp_timer_test.c:94:test_abs_timeouts(): [1] test_timeouts >>> odp_timer_test.c:102:test_abs_timeouts(): [1] period 100 ticks, >>> 1000000000 ns >>> odp_timer_test.c:105:test_abs_timeouts(): [1] current tick 0 >>> Alarm clock >>> bill@Ubuntu15:~/linaro/maximtimer/example/timer$ >>> >>> >>> On Wed, Feb 3, 2016 at 3:05 AM, Maxim Uvarov <maxim.uvarov@linaro.org >>> <mailto:maxim.uvarov@linaro.org>> wrote: >>> >>> Switch timer to use SIGEV_THREAD_ID instead of SIGEV_THREAD. >>> I.e. do not start timer handle thread on each timer action. >>> Start timer handle manually and wait for signal there. >>> This patch also fixes nasty bug with hanging timer threads, >>> which wants to access to timer pool and free shm on pool >>> destroy action. >>> >>> Signed-off-by: Maxim Uvarov <maxim.uvarov@linaro.org >>> <mailto:maxim.uvarov@linaro.org>> >>> >>> --- >>> platform/linux-generic/odp_timer.c | 106 >>> ++++++++++++++++++++++++++++++------- >>> 1 file changed, 87 insertions(+), 19 deletions(-) >>> >>> diff --git a/platform/linux-generic/odp_timer.c >>> b/platform/linux-generic/odp_timer.c >>> index 1001af8..8bc8cc8 100644 >>> --- a/platform/linux-generic/odp_timer.c >>> +++ b/platform/linux-generic/odp_timer.c >>> @@ -27,6 +27,10 @@ >>> #include <stdlib.h> >>> #include <time.h> >>> #include <signal.h> >>> +#include <pthread.h> >>> +#include <unistd.h> >>> +#include <sys/syscall.h> >>> + >>> #include <odp/align.h> >>> #include <odp_align_internal.h> >>> #include <odp/atomic.h> >>> @@ -159,7 +163,6 @@ typedef struct odp_timer_pool_s { >>> tick_buf_t *tick_buf; /* Expiration tick and timeout buffer >>> */ >>> odp_timer *timers; /* User pointer and queue handle (and >>> lock) */ >>> odp_atomic_u32_t high_wm;/* High watermark of allocated >>> timers */ >>> - odp_spinlock_t itimer_running; >>> odp_spinlock_t lock; >>> uint32_t num_alloc;/* Current number of allocated timers */ >>> uint32_t first_free;/* 0..max_timers-1 => free timer */ >>> @@ -169,6 +172,9 @@ typedef struct odp_timer_pool_s { >>> odp_shm_t shm; >>> timer_t timerid; >>> int notify_overrun; >>> + pthread_t timer_thread; /* pthread_t of timer thread */ >>> + pid_t timer_thread_id; /* gettid() for timer thread */ >>> + int timer_thread_exit; /* request to exit for timer thread */ >>> } odp_timer_pool; >>> >>> #define MAX_TIMER_POOLS 255 /* Leave one for ODP_TIMER_INVALID */ >>> @@ -254,26 +260,47 @@ static odp_timer_pool *odp_timer_pool_new( >>> } >>> tp->tp_idx = tp_idx; >>> odp_spinlock_init(&tp->lock); >>> - odp_spinlock_init(&tp->itimer_running); >>> timer_pool[tp_idx] = tp; >>> if (tp->param.clk_src == ODP_CLOCK_CPU) >>> itimer_init(tp); >>> return tp; >>> } >>> >>> +static void block_sigalarm(void) >>> +{ >>> + sigset_t sigset; >>> + >>> + sigemptyset(&sigset); >>> + sigaddset(&sigset, SIGALRM); >>> + sigprocmask(SIG_BLOCK, &sigset, NULL); >>> +} >>> + >>> +static void stop_timer_thread(odp_timer_pool *tp) >>> +{ >>> + int ret; >>> + >>> + tp->timer_thread_exit = 1; >>> + ret = pthread_join(tp->timer_thread, NULL); >>> + if (ret != 0) >>> + ODP_ABORT("unable to join thread, err %d\n", ret); >>> +} >>> + >>> static void odp_timer_pool_del(odp_timer_pool *tp) >>> { >>> odp_spinlock_lock(&tp->lock); >>> timer_pool[tp->tp_idx] = NULL; >>> - /* Wait for itimer thread to stop running */ >>> - odp_spinlock_lock(&tp->itimer_running); >>> + >>> + /* Stop timer triggering */ >>> + if (tp->param.clk_src == ODP_CLOCK_CPU) >>> + itimer_fini(tp); >>> + >>> + stop_timer_thread(tp); >>> + >>> if (tp->num_alloc != 0) { >>> /* It's a programming error to attempt to destroy a >>> */ >>> /* timer pool which is still in use */ >>> ODP_ABORT("%s: timers in use\n", tp->name); >>> } >>> - if (tp->param.clk_src == ODP_CLOCK_CPU) >>> - itimer_fini(tp); >>> int rc = odp_shm_free(tp->shm); >>> if (rc != 0) >>> ODP_ABORT("Failed to free shared memory (%d)\n", rc); >>> @@ -632,10 +659,10 @@ static unsigned >>> odp_timer_pool_expire(odp_timer_pool_t tpid, uint64_t tick) >>> * Functions that use Linux/POSIX per-process timers and related >>> facilities >>> >>> *****************************************************************************/ >>> >>> -static void timer_notify(sigval_t sigval) >>> +static void timer_notify(odp_timer_pool *tp) >>> { >>> int overrun; >>> - odp_timer_pool *tp = (odp_timer_pool *)sigval.sival_ptr; >>> + int64_t prev_tick; >>> >>> if (tp->notify_overrun) { >>> overrun = timer_getoverrun(tp->timerid); >>> @@ -653,32 +680,72 @@ static void timer_notify(sigval_t sigval) >>> for (i = 0; i < 32; i += ODP_CACHE_LINE_SIZE / >>> sizeof(array[0])) >>> PREFETCH(&array[i]); >>> #endif >>> - uint64_t prev_tick = odp_atomic_fetch_inc_u64(&tp->cur_tick); >>> - /* Attempt to acquire the lock, check if the old value was >>> clear */ >>> - if (odp_spinlock_trylock(&tp->itimer_running)) { >>> - /* Scan timer array, looking for timers to expire */ >>> - (void)odp_timer_pool_expire(tp, prev_tick); >>> - odp_spinlock_unlock(&tp->itimer_running); >>> - } >>> + prev_tick = odp_atomic_fetch_inc_u64(&tp->cur_tick); >>> + >>> + /* Scan timer array, looking for timers to expire */ >>> + (void)odp_timer_pool_expire(tp, prev_tick); >>> + >>> /* Else skip scan of timers. cur_tick was updated and next >>> itimer >>> * invocation will process older expiration ticks as well */ >>> } >>> >>> +static void *timer_thread(void *arg) >>> +{ >>> + odp_timer_pool *tp = (odp_timer_pool *)arg; >>> + sigset_t sigset; >>> + int ret; >>> + struct timespec tmo; >>> + siginfo_t si; >>> + >>> + tp->timer_thread_id = (pid_t)syscall(SYS_gettid); >>> + >>> + tmo.tv_sec = 0; >>> + tmo.tv_nsec = ODP_TIME_MSEC_IN_NS * 100; >>> + >>> + sigemptyset(&sigset); >>> + sigaddset(&sigset, SIGALRM); >>> + >>> + while (1) { >>> + ret = sigtimedwait(&sigset, &si, &tmo); >>> + if (tp->timer_thread_exit) { >>> + tp->timer_thread_id = 0; >>> + return NULL; >>> + } >>> + if (ret == 0) >>> + timer_notify(tp); >>> + } >>> + >>> + return NULL; >>> +} >>> + >>> static void itimer_init(odp_timer_pool *tp) >>> { >>> struct sigevent sigev; >>> struct itimerspec ispec; >>> uint64_t res, sec, nsec; >>> + int ret; >>> >>> ODP_DBG("Creating POSIX timer for timer pool %s, period %" >>> PRIu64" ns\n", tp->name, tp->param.res_ns); >>> >>> + tp->timer_thread_id = 0; >>> + ret = pthread_create(&tp->timer_thread, NULL, >>> timer_thread, tp); >>> + if (ret) >>> + ODP_ABORT("unable to create timer thread\n"); >>> + >>> + /* wait thread set tp->timer_thread_id */ >>> + do { >>> + sched_yield(); >>> + } while (tp->timer_thread_id == 0); >>> + >>> + /* Block sigalarm in current thread */ >>> + block_sigalarm(); >>> + >>> memset(&sigev, 0, sizeof(sigev)); >>> - memset(&ispec, 0, sizeof(ispec)); >>> - >>> - sigev.sigev_notify = SIGEV_THREAD; >>> - sigev.sigev_notify_function = timer_notify; >>> + sigev.sigev_notify = SIGEV_THREAD_ID; >>> sigev.sigev_value.sival_ptr = tp; >>> + sigev._sigev_un._tid = tp->timer_thread_id; >>> + sigev.sigev_signo = SIGALRM; >>> >>> if (timer_create(CLOCK_MONOTONIC, &sigev, &tp->timerid)) >>> ODP_ABORT("timer_create() returned error %s\n", >>> @@ -688,6 +755,7 @@ static void itimer_init(odp_timer_pool *tp) >>> sec = res / ODP_TIME_SEC_IN_NS; >>> nsec = res - sec * ODP_TIME_SEC_IN_NS; >>> >>> + memset(&ispec, 0, sizeof(ispec)); >>> ispec.it_interval.tv_sec = (time_t)sec; >>> ispec.it_interval.tv_nsec = (long)nsec; >>> ispec.it_value.tv_sec = (time_t)sec; >>> -- >>> 1.9.1 >>> >>> _______________________________________________ >>> lng-odp mailing list >>> lng-odp@lists.linaro.org <mailto:lng-odp@lists.linaro.org> >>> https://lists.linaro.org/mailman/listinfo/lng-odp >>> >>> >>> >> >
diff --git a/platform/linux-generic/odp_timer.c b/platform/linux-generic/odp_timer.c index 1001af8..8bc8cc8 100644 --- a/platform/linux-generic/odp_timer.c +++ b/platform/linux-generic/odp_timer.c @@ -27,6 +27,10 @@ #include <stdlib.h> #include <time.h> #include <signal.h> +#include <pthread.h> +#include <unistd.h> +#include <sys/syscall.h> + #include <odp/align.h> #include <odp_align_internal.h> #include <odp/atomic.h> @@ -159,7 +163,6 @@ typedef struct odp_timer_pool_s { tick_buf_t *tick_buf; /* Expiration tick and timeout buffer */ odp_timer *timers; /* User pointer and queue handle (and lock) */ odp_atomic_u32_t high_wm;/* High watermark of allocated timers */ - odp_spinlock_t itimer_running; odp_spinlock_t lock; uint32_t num_alloc;/* Current number of allocated timers */ uint32_t first_free;/* 0..max_timers-1 => free timer */ @@ -169,6 +172,9 @@ typedef struct odp_timer_pool_s { odp_shm_t shm; timer_t timerid; int notify_overrun; + pthread_t timer_thread; /* pthread_t of timer thread */ + pid_t timer_thread_id; /* gettid() for timer thread */ + int timer_thread_exit; /* request to exit for timer thread */ } odp_timer_pool; #define MAX_TIMER_POOLS 255 /* Leave one for ODP_TIMER_INVALID */ @@ -254,26 +260,47 @@ static odp_timer_pool *odp_timer_pool_new( } tp->tp_idx = tp_idx; odp_spinlock_init(&tp->lock); - odp_spinlock_init(&tp->itimer_running); timer_pool[tp_idx] = tp; if (tp->param.clk_src == ODP_CLOCK_CPU) itimer_init(tp); return tp; } +static void block_sigalarm(void) +{ + sigset_t sigset; + + sigemptyset(&sigset); + sigaddset(&sigset, SIGALRM); + sigprocmask(SIG_BLOCK, &sigset, NULL); +} + +static void stop_timer_thread(odp_timer_pool *tp) +{ + int ret; + + tp->timer_thread_exit = 1; + ret = pthread_join(tp->timer_thread, NULL); + if (ret != 0) + ODP_ABORT("unable to join thread, err %d\n", ret); +} + static void odp_timer_pool_del(odp_timer_pool *tp) { odp_spinlock_lock(&tp->lock); timer_pool[tp->tp_idx] = NULL; - /* Wait for itimer thread to stop running */ - odp_spinlock_lock(&tp->itimer_running); + + /* Stop timer triggering */ + if (tp->param.clk_src == ODP_CLOCK_CPU) + itimer_fini(tp); + + stop_timer_thread(tp); + if (tp->num_alloc != 0) { /* It's a programming error to attempt to destroy a */ /* timer pool which is still in use */ ODP_ABORT("%s: timers in use\n", tp->name); } - if (tp->param.clk_src == ODP_CLOCK_CPU) - itimer_fini(tp); int rc = odp_shm_free(tp->shm); if (rc != 0) ODP_ABORT("Failed to free shared memory (%d)\n", rc); @@ -632,10 +659,10 @@ static unsigned odp_timer_pool_expire(odp_timer_pool_t tpid, uint64_t tick) * Functions that use Linux/POSIX per-process timers and related facilities *****************************************************************************/ -static void timer_notify(sigval_t sigval) +static void timer_notify(odp_timer_pool *tp) { int overrun; - odp_timer_pool *tp = (odp_timer_pool *)sigval.sival_ptr; + int64_t prev_tick; if (tp->notify_overrun) { overrun = timer_getoverrun(tp->timerid); @@ -653,32 +680,72 @@ static void timer_notify(sigval_t sigval) for (i = 0; i < 32; i += ODP_CACHE_LINE_SIZE / sizeof(array[0])) PREFETCH(&array[i]); #endif - uint64_t prev_tick = odp_atomic_fetch_inc_u64(&tp->cur_tick); - /* Attempt to acquire the lock, check if the old value was clear */ - if (odp_spinlock_trylock(&tp->itimer_running)) { - /* Scan timer array, looking for timers to expire */ - (void)odp_timer_pool_expire(tp, prev_tick); - odp_spinlock_unlock(&tp->itimer_running); - } + prev_tick = odp_atomic_fetch_inc_u64(&tp->cur_tick); + + /* Scan timer array, looking for timers to expire */ + (void)odp_timer_pool_expire(tp, prev_tick); + /* Else skip scan of timers. cur_tick was updated and next itimer * invocation will process older expiration ticks as well */ } +static void *timer_thread(void *arg) +{ + odp_timer_pool *tp = (odp_timer_pool *)arg; + sigset_t sigset; + int ret; + struct timespec tmo; + siginfo_t si; + + tp->timer_thread_id = (pid_t)syscall(SYS_gettid); + + tmo.tv_sec = 0; + tmo.tv_nsec = ODP_TIME_MSEC_IN_NS * 100; + + sigemptyset(&sigset); + sigaddset(&sigset, SIGALRM); + + while (1) { + ret = sigtimedwait(&sigset, &si, &tmo); + if (tp->timer_thread_exit) { + tp->timer_thread_id = 0; + return NULL; + } + if (ret == 0) + timer_notify(tp); + } + + return NULL; +} + static void itimer_init(odp_timer_pool *tp) { struct sigevent sigev; struct itimerspec ispec; uint64_t res, sec, nsec; + int ret; ODP_DBG("Creating POSIX timer for timer pool %s, period %" PRIu64" ns\n", tp->name, tp->param.res_ns); + tp->timer_thread_id = 0; + ret = pthread_create(&tp->timer_thread, NULL, timer_thread, tp); + if (ret) + ODP_ABORT("unable to create timer thread\n"); + + /* wait thread set tp->timer_thread_id */ + do { + sched_yield(); + } while (tp->timer_thread_id == 0); + + /* Block sigalarm in current thread */ + block_sigalarm(); + memset(&sigev, 0, sizeof(sigev)); - memset(&ispec, 0, sizeof(ispec)); - - sigev.sigev_notify = SIGEV_THREAD; - sigev.sigev_notify_function = timer_notify; + sigev.sigev_notify = SIGEV_THREAD_ID; sigev.sigev_value.sival_ptr = tp; + sigev._sigev_un._tid = tp->timer_thread_id; + sigev.sigev_signo = SIGALRM; if (timer_create(CLOCK_MONOTONIC, &sigev, &tp->timerid)) ODP_ABORT("timer_create() returned error %s\n", @@ -688,6 +755,7 @@ static void itimer_init(odp_timer_pool *tp) sec = res / ODP_TIME_SEC_IN_NS; nsec = res - sec * ODP_TIME_SEC_IN_NS; + memset(&ispec, 0, sizeof(ispec)); ispec.it_interval.tv_sec = (time_t)sec; ispec.it_interval.tv_nsec = (long)nsec; ispec.it_value.tv_sec = (time_t)sec;
Switch timer to use SIGEV_THREAD_ID instead of SIGEV_THREAD. I.e. do not start timer handle thread on each timer action. Start timer handle manually and wait for signal there. This patch also fixes nasty bug with hanging timer threads, which wants to access to timer pool and free shm on pool destroy action. Signed-off-by: Maxim Uvarov <maxim.uvarov@linaro.org> --- platform/linux-generic/odp_timer.c | 106 ++++++++++++++++++++++++++++++------- 1 file changed, 87 insertions(+), 19 deletions(-)