Message ID | 1465473423-20583-1-git-send-email-matias.elo@nokia.com |
---|---|
State | New |
Headers | show |
On Thu, Jun 9, 2016 at 6:57 AM, Matias Elo <matias.elo@nokia.com> wrote: > ODP_DBG() macro should not be used inside *_print() > functions. Since debug messages are disabled by default, the > print functions won't output anything with ODP_DBG. > > The patch also removes the extra space previously added to > the beginning of ODP_PRINT messages. > > Signed-off-by: Matias Elo <matias.elo@nokia.com> > --- > > V2: > - Don't modify secondary_hash_dump() > - Remove unused tm debug print functions > > .../linux-generic/include/odp_debug_internal.h | 2 +- > platform/linux-generic/odp_pkt_queue.c | 34 +++-- > platform/linux-generic/odp_pool.c | 139 > ++++++++++++++------- > platform/linux-generic/odp_sorted_list.c | 21 +++- > platform/linux-generic/odp_timer_wheel.c | 44 +++---- > platform/linux-generic/odp_traffic_mngr.c | 41 +++--- > 6 files changed, 174 insertions(+), 107 deletions(-) > > diff --git a/platform/linux-generic/include/odp_debug_internal.h > b/platform/linux-generic/include/odp_debug_internal.h > index 02ae87a..4c44beb 100644 > --- a/platform/linux-generic/include/odp_debug_internal.h > +++ b/platform/linux-generic/include/odp_debug_internal.h > @@ -81,7 +81,7 @@ extern "C" { > * specifically for dumping internal data. > */ > #define ODP_PRINT(fmt, ...) \ > - odp_global_data.log_fn(ODP_LOG_PRINT, " " fmt, ##__VA_ARGS__) > + odp_global_data.log_fn(ODP_LOG_PRINT, fmt, ##__VA_ARGS__) > > #ifdef __cplusplus > } > diff --git a/platform/linux-generic/odp_pkt_queue.c > b/platform/linux-generic/odp_pkt_queue.c > index 949cf74..f43ceeb 100644 > --- a/platform/linux-generic/odp_pkt_queue.c > +++ b/platform/linux-generic/odp_pkt_queue.c > @@ -346,19 +346,31 @@ int _odp_pkt_queue_remove(_odp_int_queue_pool_t > queue_pool, > void _odp_pkt_queue_stats_print(_odp_int_queue_pool_t queue_pool) > { > queue_pool_t *pool; > + int max_len = 512; > + char str[max_len]; > + int len = 0; > + int n = max_len - 1; > > pool = (queue_pool_t *)(uintptr_t)queue_pool; > - ODP_DBG("pkt_queue_stats - queue_pool=0x%" PRIX64 "\n", > queue_pool); > - ODP_DBG(" max_queue_num=%u max_queued_pkts=%u > next_queue_num=%u\n", > - pool->max_queue_num, pool->max_queued_pkts, > - pool->next_queue_num); > - ODP_DBG(" total pkt appends=%" PRIu64 " total pkt removes=%" > PRIu64 > - " bad removes=%" PRIu64 "\n", > - pool->total_pkt_appends, pool->total_pkt_removes, > - pool->total_bad_removes); > - ODP_DBG(" free_list size=%u min size=%u peak size=%u\n", > - pool->free_list_size, pool->min_free_list_size, > - pool->peak_free_list_size); > + > + len += snprintf(&str[len], n - len, > + "pkt_queue_stats - queue_pool=0x%" PRIX64 "\n", > + queue_pool); > + len += snprintf(&str[len], n - len, > + " max_queue_num=%u max_queued_pkts=%u > next_queue_num=%u\n", > + pool->max_queue_num, pool->max_queued_pkts, > + pool->next_queue_num); > + len += snprintf(&str[len], n - len, > + " total pkt appends=%" PRIu64 " total pkt > removes=" > + "%" PRIu64 " bad removes=%" PRIu64 "\n", > + pool->total_pkt_appends, pool->total_pkt_removes, > + pool->total_bad_removes); > + len += snprintf(&str[len], n - len, > + " free_list size=%u min size=%u peak size=%u\n", > + pool->free_list_size, pool->min_free_list_size, > + pool->peak_free_list_size); > + str[len] = '\0'; > + ODP_PRINT("\n%s\n", str); > I'm OK with changing the ODP_DBG() calls to ODP_PRINT() but I think that's sufficient. Print output functions are almost by definition not performance path APIs so just using ODP_PRINT() seems a lot cleaner and easier to understand/maintain than trying to minimize ODP_PRINT() calls like this. > } > > void _odp_queue_pool_destroy(_odp_int_queue_pool_t queue_pool) > diff --git a/platform/linux-generic/odp_pool.c > b/platform/linux-generic/odp_pool.c > index 5ed7080..c1ffaaa 100644 > --- a/platform/linux-generic/odp_pool.c > +++ b/platform/linux-generic/odp_pool.c > @@ -735,6 +735,10 @@ void odp_pool_print(odp_pool_t pool_hdl) > { > pool_entry_t *pool; > uint32_t pool_id; > + int max_len = 1536; > + char str[max_len]; > + int len = 0; > + int n = max_len - 1; > > pool_id = pool_handle_to_index(pool_hdl); > pool = get_pool_entry(pool_id); > @@ -756,64 +760,105 @@ void odp_pool_print(odp_pool_t pool_hdl) > uint64_t blklowmct = > odp_atomic_load_u64(&pool->s.poolstats.blk_low_wm_count); > > - ODP_DBG("Pool info\n"); > - ODP_DBG("---------\n"); > - ODP_DBG(" pool %" PRIu64 "\n", > + len += snprintf(&str[len], n - len, "Pool info\n"); > + len += snprintf(&str[len], n - len, "---------\n"); > + len += snprintf(&str[len], n - len, " pool %" PRIu64 > "\n", > odp_pool_to_u64(pool->s.pool_hdl)); > - ODP_DBG(" name %s\n", > + len += snprintf(&str[len], n - len, " name %s\n", > pool->s.flags.has_name ? pool->s.name : "Unnamed Pool"); > - ODP_DBG(" pool type %s\n", > + len += snprintf(&str[len], n - len, " pool type %s\n", > pool->s.params.type == ODP_POOL_BUFFER ? "buffer" : > (pool->s.params.type == ODP_POOL_PACKET ? "packet" : > (pool->s.params.type == ODP_POOL_TIMEOUT ? "timeout" : > "unknown"))); > - ODP_DBG(" pool storage ODP managed shm handle %" PRIu64 "\n", > - odp_shm_to_u64(pool->s.pool_shm)); > - ODP_DBG(" pool status %s\n", > - pool->s.quiesced ? "quiesced" : "active"); > - ODP_DBG(" pool opts %s, %s, %s\n", > - pool->s.flags.unsegmented ? "unsegmented" : "segmented", > - pool->s.flags.zeroized ? "zeroized" : "non-zeroized", > - pool->s.flags.predefined ? "predefined" : "created"); > - ODP_DBG(" pool base %p\n", pool->s.pool_base_addr); > - ODP_DBG(" pool size %zu (%zu pages)\n", > + len += snprintf(&str[len], n - len, > + " pool storage ODP managed shm handle %" PRIu64 > "\n", > + odp_shm_to_u64(pool->s.pool_shm)); > + len += snprintf(&str[len], n - len, " pool status %s\n", > + pool->s.quiesced ? "quiesced" : "active"); > + len += snprintf(&str[len], n - len, " pool opts %s, %s, > %s\n", > + pool->s.flags.unsegmented ? "unsegmented" : > "segmented", > + pool->s.flags.zeroized ? "zeroized" : > "non-zeroized", > + pool->s.flags.predefined ? "predefined" : > "created"); > + len += snprintf(&str[len], n - len, " pool base %p\n", > + pool->s.pool_base_addr); > + len += snprintf(&str[len], n - len, > + " pool size %zu (%zu pages)\n", > pool->s.pool_size, pool->s.pool_size / ODP_PAGE_SIZE); > - ODP_DBG(" pool mdata base %p\n", pool->s.pool_mdata_addr); > - ODP_DBG(" udata size %zu\n", pool->s.udata_size); > - ODP_DBG(" headroom %u\n", pool->s.headroom); > - ODP_DBG(" tailroom %u\n", pool->s.tailroom); > + len += snprintf(&str[len], n - len, " pool mdata base %p\n", > + pool->s.pool_mdata_addr); > + len += snprintf(&str[len], n - len, " udata size %" PRIu32 > "\n", > + pool->s.udata_size); > + len += snprintf(&str[len], n - len, " headroom %" PRIu32 > "\n", > + pool->s.headroom); > + len += snprintf(&str[len], n - len, " tailroom %" PRIu32 > "\n", > + pool->s.tailroom); > if (pool->s.params.type == ODP_POOL_BUFFER) { > - ODP_DBG(" buf size %zu\n", pool->s.params.buf.size); > - ODP_DBG(" buf align %u requested, %u used\n", > - pool->s.params.buf.align, pool->s.buf_align); > + len += snprintf(&str[len], n - len, > + " buf size %" PRIu32 "\n", > + pool->s.params.buf.size); > + len += snprintf(&str[len], n - len, > + " buf align %" PRIu32 " requested, " > + "%" PRIu32 " used\n", > pool->s.params.buf.align, > + pool->s.buf_align); > } else if (pool->s.params.type == ODP_POOL_PACKET) { > - ODP_DBG(" seg length %u requested, %u used\n", > - pool->s.params.pkt.seg_len, pool->s.seg_size); > - ODP_DBG(" pkt length %u requested, %u used\n", > - pool->s.params.pkt.len, pool->s.blk_size); > + len += snprintf(&str[len], n - len, > + " seg length %" PRIu32 " requested, " > + "%" PRIu32 " used\n", > + pool->s.params.pkt.seg_len, > pool->s.seg_size); > + len += snprintf(&str[len], n - len, > + " pkt length %" PRIu32 " requested, " > + "%" PRIu32 " used\n", > pool->s.params.pkt.len, > + pool->s.blk_size); > } > - ODP_DBG(" num bufs %u\n", pool->s.buf_num); > - ODP_DBG(" bufs available %u %s\n", bufcount, > - pool->s.buf_low_wm_assert ? " **buf low wm asserted**" : > ""); > - ODP_DBG(" bufs in use %u\n", pool->s.buf_num - bufcount); > - ODP_DBG(" buf allocs %lu\n", bufallocs); > - ODP_DBG(" buf frees %lu\n", buffrees); > - ODP_DBG(" buf empty %lu\n", bufempty); > - ODP_DBG(" blk size %zu\n", > + len += snprintf(&str[len], n - len, > + " num bufs %" PRIu32 "\n", > pool->s.buf_num); > + len += snprintf(&str[len], n - len, " bufs available %" PRIu32 " > %s\n", > + bufcount, pool->s.buf_low_wm_assert ? > + " **buf low wm asserted**" : ""); > + len += snprintf(&str[len], n - len, > + " bufs in use %" PRIu32 "\n", > + pool->s.buf_num - bufcount); > + len += snprintf(&str[len], n - len, > + " buf allocs %" PRIu64 "\n", bufallocs); > + len += snprintf(&str[len], n - len, > + " buf frees %" PRIu64 "\n", buffrees); > + len += snprintf(&str[len], n - len, > + " buf empty %" PRIu64 "\n", bufempty); > + len += snprintf(&str[len], n - len, > + " blk size %" PRIu32 "\n", > pool->s.seg_size > ODP_MAX_INLINE_BUF ? pool->s.seg_size : > 0); > - ODP_DBG(" blks available %u %s\n", blkcount, > - pool->s.blk_low_wm_assert ? " **blk low wm asserted**" : > ""); > - ODP_DBG(" blk allocs %lu\n", blkallocs); > - ODP_DBG(" blk frees %lu\n", blkfrees); > - ODP_DBG(" blk empty %lu\n", blkempty); > - ODP_DBG(" buf high wm value %lu\n", pool->s.buf_high_wm); > - ODP_DBG(" buf high wm count %lu\n", bufhiwmct); > - ODP_DBG(" buf low wm value %lu\n", pool->s.buf_low_wm); > - ODP_DBG(" buf low wm count %lu\n", buflowmct); > - ODP_DBG(" blk high wm value %lu\n", pool->s.blk_high_wm); > - ODP_DBG(" blk high wm count %lu\n", blkhiwmct); > - ODP_DBG(" blk low wm value %lu\n", pool->s.blk_low_wm); > - ODP_DBG(" blk low wm count %lu\n", blklowmct); > + len += snprintf(&str[len], n - len, " blks available %" PRIu32 " > %s\n", > + blkcount, pool->s.blk_low_wm_assert ? > + " **blk low wm asserted**" : ""); > + len += snprintf(&str[len], n - len, > + " blk allocs %" PRIu64 "\n", blkallocs); > + len += snprintf(&str[len], n - len, > + " blk frees %" PRIu64 "\n", blkfrees); > + len += snprintf(&str[len], n - len, > + " blk empty %" PRIu64 "\n", blkempty); > + len += snprintf(&str[len], n - len, > + " buf high wm value %" PRIu32 "\n", > + pool->s.buf_high_wm); > + len += snprintf(&str[len], n - len, > + " buf high wm count %" PRIu64 "\n", bufhiwmct); > + len += snprintf(&str[len], n - len, > + " buf low wm value %" PRIu32 "\n", > + pool->s.buf_low_wm); > + len += snprintf(&str[len], n - len, > + " buf low wm count %" PRIu64 "\n", buflowmct); > + len += snprintf(&str[len], n - len, > + " blk high wm value %" PRIu32 "\n", > + pool->s.blk_high_wm); > + len += snprintf(&str[len], n - len, > + " blk high wm count %" PRIu64 "\n", blkhiwmct); > + len += snprintf(&str[len], n - len, > + " blk low wm value %" PRIu32 "\n", > + pool->s.blk_low_wm); > + len += snprintf(&str[len], n - len, > + " blk low wm count %" PRIu64 "\n", blklowmct); > + str[len] = '\0'; > + ODP_PRINT("\n%s\n", str); > Same comments here, even more so. Trying to do everything in one ODP_PRINT() call like this looks cumbersome and hard to follow or maintain. Just change the ODP_DBG() calls to corresponding ODP_PRINT() calls. > } > > > diff --git a/platform/linux-generic/odp_sorted_list.c > b/platform/linux-generic/odp_sorted_list.c > index 554494b..0dc08a8 100644 > --- a/platform/linux-generic/odp_sorted_list.c > +++ b/platform/linux-generic/odp_sorted_list.c > @@ -256,14 +256,23 @@ int _odp_sorted_list_destroy(_odp_int_sorted_pool_t > sorted_pool, > void _odp_sorted_list_stats_print(_odp_int_sorted_pool_t sorted_pool) > { > sorted_pool_t *pool; > + int max_len = 512; > + char str[max_len]; > + int len = 0; > + int n = max_len - 1; > > pool = (sorted_pool_t *)(uintptr_t)sorted_pool; > - ODP_DBG("sorted_pool=0x%" PRIX64 "\n", sorted_pool); > - ODP_DBG(" max_sorted_lists=%u next_list_idx=%u\n", > - pool->max_sorted_lists, pool->next_list_idx); > - ODP_DBG(" total_inserts=%" PRIu64 " total_deletes=%" PRIu64 > - " total_removes=%" PRIu64 "\n", pool->total_inserts, > - pool->total_deletes, pool->total_removes); > + len += snprintf(&str[len], n - len, > + "sorted_pool=0x%" PRIX64 "\n", sorted_pool); > + len += snprintf(&str[len], n - len, > + " max_sorted_lists=%u next_list_idx=%u\n", > + pool->max_sorted_lists, pool->next_list_idx); > + len += snprintf(&str[len], n - len, > + " total_inserts=%" PRIu64 " total_deletes=%" > PRIu64 > + " total_removes=%" PRIu64 "\n", > pool->total_inserts, > + pool->total_deletes, pool->total_removes); > + str[len] = '\0'; > + ODP_PRINT("\n%s\n", str); > Same comment here. > } > > void _odp_sorted_pool_destroy(_odp_int_sorted_pool_t sorted_pool) > diff --git a/platform/linux-generic/odp_timer_wheel.c > b/platform/linux-generic/odp_timer_wheel.c > index 865dd7e..4b1986f 100644 > --- a/platform/linux-generic/odp_timer_wheel.c > +++ b/platform/linux-generic/odp_timer_wheel.c > @@ -934,10 +934,10 @@ uint32_t _odp_timer_wheel_count(_odp_timer_wheel_t > timer_wheel) > static void _odp_int_timer_wheel_desc_print(wheel_desc_t *wheel_desc, > uint32_t wheel_idx) > { > - ODP_DBG(" wheel=%u num_slots=%u ticks_shift=%u ticks_per_slot=%u" > - " ticks_per_rev=%" PRIu64 "\n", > - wheel_idx, wheel_desc->num_slots, wheel_desc->ticks_shift, > - wheel_desc->ticks_per_slot, wheel_desc->ticks_per_rev); > + ODP_PRINT(" wheel=%u num_slots=%u ticks_shift=%u > ticks_per_slot=%u" > + " ticks_per_rev=%" PRIu64 "\n", > + wheel_idx, wheel_desc->num_slots, > wheel_desc->ticks_shift, > + wheel_desc->ticks_per_slot, wheel_desc->ticks_per_rev); > } > > void _odp_timer_wheel_stats_print(_odp_timer_wheel_t timer_wheel) > @@ -949,28 +949,30 @@ void _odp_timer_wheel_stats_print(_odp_timer_wheel_t > timer_wheel) > timer_wheels = (timer_wheels_t *)(uintptr_t)timer_wheel; > expired_ring = timer_wheels->expired_timers_ring; > > - ODP_DBG("_odp_int_timer_wheel_stats current_ticks=%" PRIu64 "\n", > - timer_wheels->current_ticks); > + ODP_PRINT("_odp_int_timer_wheel_stats current_ticks=%" PRIu64 "\n", > + timer_wheels->current_ticks); > for (wheel_idx = 0; wheel_idx < 4; wheel_idx++) > _odp_int_timer_wheel_desc_print( > &timer_wheels->wheel_descs[wheel_idx], > wheel_idx); > > - ODP_DBG(" total timer_inserts=%" PRIu64 " timer_removes=%" PRIu64 > - " insert_fails=%" PRIu64 "\n", > - timer_wheels->total_timer_inserts, > - timer_wheels->total_timer_removes, > - timer_wheels->insert_fail_cnt); > - ODP_DBG(" total_promote_cnt=%" PRIu64 " promote_fail_cnt=%" > - PRIu64 "\n", timer_wheels->total_promote_cnt, > - timer_wheels->promote_fail_cnt); > - ODP_DBG(" free_list_size=%u min_size=%u peak_size=%u\n", > - timer_wheels->free_list_size, > timer_wheels->min_free_list_size, > - timer_wheels->peak_free_list_size); > - ODP_DBG(" expired_timers_ring size=%u count=%u " > - "peak_count=%u full_cnt=%u\n", > - expired_ring->max_idx + 1, expired_ring->count, > - expired_ring->peak_count, > expired_ring->expired_ring_full_cnt); > + ODP_PRINT(" total timer_inserts=%" PRIu64 " timer_removes=%" > PRIu64 > + " insert_fails=%" PRIu64 "\n", > + timer_wheels->total_timer_inserts, > + timer_wheels->total_timer_removes, > + timer_wheels->insert_fail_cnt); > + ODP_PRINT(" total_promote_cnt=%" PRIu64 " promote_fail_cnt=%" > + PRIu64 "\n", timer_wheels->total_promote_cnt, > + timer_wheels->promote_fail_cnt); > + ODP_PRINT(" free_list_size=%u min_size=%u peak_size=%u\n", > + timer_wheels->free_list_size, > + timer_wheels->min_free_list_size, > + timer_wheels->peak_free_list_size); > + ODP_PRINT(" expired_timers_ring size=%u count=%u " > + "peak_count=%u full_cnt=%u\n", > + expired_ring->max_idx + 1, expired_ring->count, > + expired_ring->peak_count, > + expired_ring->expired_ring_full_cnt); > Interesting that in this case you used multiple ODP_PRINT() calls rather than the sprintf() stuff. I think this looks a lot cleaner and more straightforward. > } > > void _odp_timer_wheel_destroy(_odp_timer_wheel_t timer_wheel) > diff --git a/platform/linux-generic/odp_traffic_mngr.c > b/platform/linux-generic/odp_traffic_mngr.c > index 1fa2d27..3dad3ad 100644 > --- a/platform/linux-generic/odp_traffic_mngr.c > +++ b/platform/linux-generic/odp_traffic_mngr.c > @@ -4406,19 +4406,19 @@ void odp_tm_stats_print(odp_tm_t odp_tm) > tm_system = GET_TM_SYSTEM(odp_tm); > input_work_queue = tm_system->input_work_queue; > > - ODP_DBG("odp_tm_stats_print - tm_system=0x%" PRIX64 " tm_idx=%u\n", > - odp_tm, tm_system->tm_idx); > - ODP_DBG(" input_work_queue size=%u current cnt=%u peak cnt=%u\n", > - INPUT_WORK_RING_SIZE, input_work_queue->queue_cnt, > - input_work_queue->peak_cnt); > - ODP_DBG(" input_work_queue enqueues=%" PRIu64 " dequeues=% " > PRIu64 > - " fail_cnt=%" PRIu64 "\n", > input_work_queue->total_enqueues, > - input_work_queue->total_dequeues, > - input_work_queue->enqueue_fail_cnt); > - ODP_DBG(" green_cnt=%" PRIu64 " yellow_cnt=%" PRIu64 " red_cnt=%" > - PRIu64 "\n", tm_system->shaper_green_cnt, > - tm_system->shaper_yellow_cnt, > - tm_system->shaper_red_cnt); > + ODP_PRINT("odp_tm_stats_print - tm_system=0x%" PRIX64 " > tm_idx=%u\n", > + odp_tm, tm_system->tm_idx); > + ODP_PRINT(" input_work_queue size=%u current cnt=%u peak > cnt=%u\n", > + INPUT_WORK_RING_SIZE, input_work_queue->queue_cnt, > + input_work_queue->peak_cnt); > + ODP_PRINT(" input_work_queue enqueues=%" PRIu64 " dequeues=% " > PRIu64 > + " fail_cnt=%" PRIu64 "\n", > input_work_queue->total_enqueues, > + input_work_queue->total_dequeues, > + input_work_queue->enqueue_fail_cnt); > + ODP_PRINT(" green_cnt=%" PRIu64 " yellow_cnt=%" PRIu64 " > red_cnt=%" > + PRIu64 "\n", tm_system->shaper_green_cnt, > + tm_system->shaper_yellow_cnt, > + tm_system->shaper_red_cnt); > > _odp_pkt_queue_stats_print(tm_system->_odp_int_queue_pool); > _odp_timer_wheel_stats_print(tm_system->_odp_int_timer_wheel); > @@ -4428,14 +4428,13 @@ void odp_tm_stats_print(odp_tm_t odp_tm) > for (queue_num = 1; queue_num < max_queue_num; queue_num++) { > tm_queue_obj = tm_system->queue_num_tbl[queue_num]; > if (tm_queue_obj && tm_queue_obj->pkts_rcvd_cnt != 0) > - ODP_DBG("queue_num=%u priority=%u rcvd=%u > enqueued=%u " > - "dequeued=%u consumed=%u\n", > - queue_num, > - tm_queue_obj->priority, > - tm_queue_obj->pkts_rcvd_cnt, > - tm_queue_obj->pkts_enqueued_cnt, > - tm_queue_obj->pkts_dequeued_cnt, > - tm_queue_obj->pkts_consumed_cnt); > + ODP_PRINT("queue_num=%u priority=%u rcvd=%u " > + "enqueued=%u dequeued=%u consumed=%u\n", > + queue_num, tm_queue_obj->priority, > + tm_queue_obj->pkts_rcvd_cnt, > + tm_queue_obj->pkts_enqueued_cnt, > + tm_queue_obj->pkts_dequeued_cnt, > + tm_queue_obj->pkts_consumed_cnt); > } > } > > -- > 1.9.1 > > _______________________________________________ > lng-odp mailing list > lng-odp@lists.linaro.org > https://lists.linaro.org/mailman/listinfo/lng-odp >
diff --git a/platform/linux-generic/include/odp_debug_internal.h b/platform/linux-generic/include/odp_debug_internal.h index 02ae87a..4c44beb 100644 --- a/platform/linux-generic/include/odp_debug_internal.h +++ b/platform/linux-generic/include/odp_debug_internal.h @@ -81,7 +81,7 @@ extern "C" { * specifically for dumping internal data. */ #define ODP_PRINT(fmt, ...) \ - odp_global_data.log_fn(ODP_LOG_PRINT, " " fmt, ##__VA_ARGS__) + odp_global_data.log_fn(ODP_LOG_PRINT, fmt, ##__VA_ARGS__) #ifdef __cplusplus } diff --git a/platform/linux-generic/odp_pkt_queue.c b/platform/linux-generic/odp_pkt_queue.c index 949cf74..f43ceeb 100644 --- a/platform/linux-generic/odp_pkt_queue.c +++ b/platform/linux-generic/odp_pkt_queue.c @@ -346,19 +346,31 @@ int _odp_pkt_queue_remove(_odp_int_queue_pool_t queue_pool, void _odp_pkt_queue_stats_print(_odp_int_queue_pool_t queue_pool) { queue_pool_t *pool; + int max_len = 512; + char str[max_len]; + int len = 0; + int n = max_len - 1; pool = (queue_pool_t *)(uintptr_t)queue_pool; - ODP_DBG("pkt_queue_stats - queue_pool=0x%" PRIX64 "\n", queue_pool); - ODP_DBG(" max_queue_num=%u max_queued_pkts=%u next_queue_num=%u\n", - pool->max_queue_num, pool->max_queued_pkts, - pool->next_queue_num); - ODP_DBG(" total pkt appends=%" PRIu64 " total pkt removes=%" PRIu64 - " bad removes=%" PRIu64 "\n", - pool->total_pkt_appends, pool->total_pkt_removes, - pool->total_bad_removes); - ODP_DBG(" free_list size=%u min size=%u peak size=%u\n", - pool->free_list_size, pool->min_free_list_size, - pool->peak_free_list_size); + + len += snprintf(&str[len], n - len, + "pkt_queue_stats - queue_pool=0x%" PRIX64 "\n", + queue_pool); + len += snprintf(&str[len], n - len, + " max_queue_num=%u max_queued_pkts=%u next_queue_num=%u\n", + pool->max_queue_num, pool->max_queued_pkts, + pool->next_queue_num); + len += snprintf(&str[len], n - len, + " total pkt appends=%" PRIu64 " total pkt removes=" + "%" PRIu64 " bad removes=%" PRIu64 "\n", + pool->total_pkt_appends, pool->total_pkt_removes, + pool->total_bad_removes); + len += snprintf(&str[len], n - len, + " free_list size=%u min size=%u peak size=%u\n", + pool->free_list_size, pool->min_free_list_size, + pool->peak_free_list_size); + str[len] = '\0'; + ODP_PRINT("\n%s\n", str); } void _odp_queue_pool_destroy(_odp_int_queue_pool_t queue_pool) diff --git a/platform/linux-generic/odp_pool.c b/platform/linux-generic/odp_pool.c index 5ed7080..c1ffaaa 100644 --- a/platform/linux-generic/odp_pool.c +++ b/platform/linux-generic/odp_pool.c @@ -735,6 +735,10 @@ void odp_pool_print(odp_pool_t pool_hdl) { pool_entry_t *pool; uint32_t pool_id; + int max_len = 1536; + char str[max_len]; + int len = 0; + int n = max_len - 1; pool_id = pool_handle_to_index(pool_hdl); pool = get_pool_entry(pool_id); @@ -756,64 +760,105 @@ void odp_pool_print(odp_pool_t pool_hdl) uint64_t blklowmct = odp_atomic_load_u64(&pool->s.poolstats.blk_low_wm_count); - ODP_DBG("Pool info\n"); - ODP_DBG("---------\n"); - ODP_DBG(" pool %" PRIu64 "\n", + len += snprintf(&str[len], n - len, "Pool info\n"); + len += snprintf(&str[len], n - len, "---------\n"); + len += snprintf(&str[len], n - len, " pool %" PRIu64 "\n", odp_pool_to_u64(pool->s.pool_hdl)); - ODP_DBG(" name %s\n", + len += snprintf(&str[len], n - len, " name %s\n", pool->s.flags.has_name ? pool->s.name : "Unnamed Pool"); - ODP_DBG(" pool type %s\n", + len += snprintf(&str[len], n - len, " pool type %s\n", pool->s.params.type == ODP_POOL_BUFFER ? "buffer" : (pool->s.params.type == ODP_POOL_PACKET ? "packet" : (pool->s.params.type == ODP_POOL_TIMEOUT ? "timeout" : "unknown"))); - ODP_DBG(" pool storage ODP managed shm handle %" PRIu64 "\n", - odp_shm_to_u64(pool->s.pool_shm)); - ODP_DBG(" pool status %s\n", - pool->s.quiesced ? "quiesced" : "active"); - ODP_DBG(" pool opts %s, %s, %s\n", - pool->s.flags.unsegmented ? "unsegmented" : "segmented", - pool->s.flags.zeroized ? "zeroized" : "non-zeroized", - pool->s.flags.predefined ? "predefined" : "created"); - ODP_DBG(" pool base %p\n", pool->s.pool_base_addr); - ODP_DBG(" pool size %zu (%zu pages)\n", + len += snprintf(&str[len], n - len, + " pool storage ODP managed shm handle %" PRIu64 "\n", + odp_shm_to_u64(pool->s.pool_shm)); + len += snprintf(&str[len], n - len, " pool status %s\n", + pool->s.quiesced ? "quiesced" : "active"); + len += snprintf(&str[len], n - len, " pool opts %s, %s, %s\n", + pool->s.flags.unsegmented ? "unsegmented" : "segmented", + pool->s.flags.zeroized ? "zeroized" : "non-zeroized", + pool->s.flags.predefined ? "predefined" : "created"); + len += snprintf(&str[len], n - len, " pool base %p\n", + pool->s.pool_base_addr); + len += snprintf(&str[len], n - len, + " pool size %zu (%zu pages)\n", pool->s.pool_size, pool->s.pool_size / ODP_PAGE_SIZE); - ODP_DBG(" pool mdata base %p\n", pool->s.pool_mdata_addr); - ODP_DBG(" udata size %zu\n", pool->s.udata_size); - ODP_DBG(" headroom %u\n", pool->s.headroom); - ODP_DBG(" tailroom %u\n", pool->s.tailroom); + len += snprintf(&str[len], n - len, " pool mdata base %p\n", + pool->s.pool_mdata_addr); + len += snprintf(&str[len], n - len, " udata size %" PRIu32 "\n", + pool->s.udata_size); + len += snprintf(&str[len], n - len, " headroom %" PRIu32 "\n", + pool->s.headroom); + len += snprintf(&str[len], n - len, " tailroom %" PRIu32 "\n", + pool->s.tailroom); if (pool->s.params.type == ODP_POOL_BUFFER) { - ODP_DBG(" buf size %zu\n", pool->s.params.buf.size); - ODP_DBG(" buf align %u requested, %u used\n", - pool->s.params.buf.align, pool->s.buf_align); + len += snprintf(&str[len], n - len, + " buf size %" PRIu32 "\n", + pool->s.params.buf.size); + len += snprintf(&str[len], n - len, + " buf align %" PRIu32 " requested, " + "%" PRIu32 " used\n", pool->s.params.buf.align, + pool->s.buf_align); } else if (pool->s.params.type == ODP_POOL_PACKET) { - ODP_DBG(" seg length %u requested, %u used\n", - pool->s.params.pkt.seg_len, pool->s.seg_size); - ODP_DBG(" pkt length %u requested, %u used\n", - pool->s.params.pkt.len, pool->s.blk_size); + len += snprintf(&str[len], n - len, + " seg length %" PRIu32 " requested, " + "%" PRIu32 " used\n", + pool->s.params.pkt.seg_len, pool->s.seg_size); + len += snprintf(&str[len], n - len, + " pkt length %" PRIu32 " requested, " + "%" PRIu32 " used\n", pool->s.params.pkt.len, + pool->s.blk_size); } - ODP_DBG(" num bufs %u\n", pool->s.buf_num); - ODP_DBG(" bufs available %u %s\n", bufcount, - pool->s.buf_low_wm_assert ? " **buf low wm asserted**" : ""); - ODP_DBG(" bufs in use %u\n", pool->s.buf_num - bufcount); - ODP_DBG(" buf allocs %lu\n", bufallocs); - ODP_DBG(" buf frees %lu\n", buffrees); - ODP_DBG(" buf empty %lu\n", bufempty); - ODP_DBG(" blk size %zu\n", + len += snprintf(&str[len], n - len, + " num bufs %" PRIu32 "\n", pool->s.buf_num); + len += snprintf(&str[len], n - len, " bufs available %" PRIu32 " %s\n", + bufcount, pool->s.buf_low_wm_assert ? + " **buf low wm asserted**" : ""); + len += snprintf(&str[len], n - len, + " bufs in use %" PRIu32 "\n", + pool->s.buf_num - bufcount); + len += snprintf(&str[len], n - len, + " buf allocs %" PRIu64 "\n", bufallocs); + len += snprintf(&str[len], n - len, + " buf frees %" PRIu64 "\n", buffrees); + len += snprintf(&str[len], n - len, + " buf empty %" PRIu64 "\n", bufempty); + len += snprintf(&str[len], n - len, + " blk size %" PRIu32 "\n", pool->s.seg_size > ODP_MAX_INLINE_BUF ? pool->s.seg_size : 0); - ODP_DBG(" blks available %u %s\n", blkcount, - pool->s.blk_low_wm_assert ? " **blk low wm asserted**" : ""); - ODP_DBG(" blk allocs %lu\n", blkallocs); - ODP_DBG(" blk frees %lu\n", blkfrees); - ODP_DBG(" blk empty %lu\n", blkempty); - ODP_DBG(" buf high wm value %lu\n", pool->s.buf_high_wm); - ODP_DBG(" buf high wm count %lu\n", bufhiwmct); - ODP_DBG(" buf low wm value %lu\n", pool->s.buf_low_wm); - ODP_DBG(" buf low wm count %lu\n", buflowmct); - ODP_DBG(" blk high wm value %lu\n", pool->s.blk_high_wm); - ODP_DBG(" blk high wm count %lu\n", blkhiwmct); - ODP_DBG(" blk low wm value %lu\n", pool->s.blk_low_wm); - ODP_DBG(" blk low wm count %lu\n", blklowmct); + len += snprintf(&str[len], n - len, " blks available %" PRIu32 " %s\n", + blkcount, pool->s.blk_low_wm_assert ? + " **blk low wm asserted**" : ""); + len += snprintf(&str[len], n - len, + " blk allocs %" PRIu64 "\n", blkallocs); + len += snprintf(&str[len], n - len, + " blk frees %" PRIu64 "\n", blkfrees); + len += snprintf(&str[len], n - len, + " blk empty %" PRIu64 "\n", blkempty); + len += snprintf(&str[len], n - len, + " buf high wm value %" PRIu32 "\n", + pool->s.buf_high_wm); + len += snprintf(&str[len], n - len, + " buf high wm count %" PRIu64 "\n", bufhiwmct); + len += snprintf(&str[len], n - len, + " buf low wm value %" PRIu32 "\n", + pool->s.buf_low_wm); + len += snprintf(&str[len], n - len, + " buf low wm count %" PRIu64 "\n", buflowmct); + len += snprintf(&str[len], n - len, + " blk high wm value %" PRIu32 "\n", + pool->s.blk_high_wm); + len += snprintf(&str[len], n - len, + " blk high wm count %" PRIu64 "\n", blkhiwmct); + len += snprintf(&str[len], n - len, + " blk low wm value %" PRIu32 "\n", + pool->s.blk_low_wm); + len += snprintf(&str[len], n - len, + " blk low wm count %" PRIu64 "\n", blklowmct); + str[len] = '\0'; + ODP_PRINT("\n%s\n", str); } diff --git a/platform/linux-generic/odp_sorted_list.c b/platform/linux-generic/odp_sorted_list.c index 554494b..0dc08a8 100644 --- a/platform/linux-generic/odp_sorted_list.c +++ b/platform/linux-generic/odp_sorted_list.c @@ -256,14 +256,23 @@ int _odp_sorted_list_destroy(_odp_int_sorted_pool_t sorted_pool, void _odp_sorted_list_stats_print(_odp_int_sorted_pool_t sorted_pool) { sorted_pool_t *pool; + int max_len = 512; + char str[max_len]; + int len = 0; + int n = max_len - 1; pool = (sorted_pool_t *)(uintptr_t)sorted_pool; - ODP_DBG("sorted_pool=0x%" PRIX64 "\n", sorted_pool); - ODP_DBG(" max_sorted_lists=%u next_list_idx=%u\n", - pool->max_sorted_lists, pool->next_list_idx); - ODP_DBG(" total_inserts=%" PRIu64 " total_deletes=%" PRIu64 - " total_removes=%" PRIu64 "\n", pool->total_inserts, - pool->total_deletes, pool->total_removes); + len += snprintf(&str[len], n - len, + "sorted_pool=0x%" PRIX64 "\n", sorted_pool); + len += snprintf(&str[len], n - len, + " max_sorted_lists=%u next_list_idx=%u\n", + pool->max_sorted_lists, pool->next_list_idx); + len += snprintf(&str[len], n - len, + " total_inserts=%" PRIu64 " total_deletes=%" PRIu64 + " total_removes=%" PRIu64 "\n", pool->total_inserts, + pool->total_deletes, pool->total_removes); + str[len] = '\0'; + ODP_PRINT("\n%s\n", str); } void _odp_sorted_pool_destroy(_odp_int_sorted_pool_t sorted_pool) diff --git a/platform/linux-generic/odp_timer_wheel.c b/platform/linux-generic/odp_timer_wheel.c index 865dd7e..4b1986f 100644 --- a/platform/linux-generic/odp_timer_wheel.c +++ b/platform/linux-generic/odp_timer_wheel.c @@ -934,10 +934,10 @@ uint32_t _odp_timer_wheel_count(_odp_timer_wheel_t timer_wheel) static void _odp_int_timer_wheel_desc_print(wheel_desc_t *wheel_desc, uint32_t wheel_idx) { - ODP_DBG(" wheel=%u num_slots=%u ticks_shift=%u ticks_per_slot=%u" - " ticks_per_rev=%" PRIu64 "\n", - wheel_idx, wheel_desc->num_slots, wheel_desc->ticks_shift, - wheel_desc->ticks_per_slot, wheel_desc->ticks_per_rev); + ODP_PRINT(" wheel=%u num_slots=%u ticks_shift=%u ticks_per_slot=%u" + " ticks_per_rev=%" PRIu64 "\n", + wheel_idx, wheel_desc->num_slots, wheel_desc->ticks_shift, + wheel_desc->ticks_per_slot, wheel_desc->ticks_per_rev); } void _odp_timer_wheel_stats_print(_odp_timer_wheel_t timer_wheel) @@ -949,28 +949,30 @@ void _odp_timer_wheel_stats_print(_odp_timer_wheel_t timer_wheel) timer_wheels = (timer_wheels_t *)(uintptr_t)timer_wheel; expired_ring = timer_wheels->expired_timers_ring; - ODP_DBG("_odp_int_timer_wheel_stats current_ticks=%" PRIu64 "\n", - timer_wheels->current_ticks); + ODP_PRINT("_odp_int_timer_wheel_stats current_ticks=%" PRIu64 "\n", + timer_wheels->current_ticks); for (wheel_idx = 0; wheel_idx < 4; wheel_idx++) _odp_int_timer_wheel_desc_print( &timer_wheels->wheel_descs[wheel_idx], wheel_idx); - ODP_DBG(" total timer_inserts=%" PRIu64 " timer_removes=%" PRIu64 - " insert_fails=%" PRIu64 "\n", - timer_wheels->total_timer_inserts, - timer_wheels->total_timer_removes, - timer_wheels->insert_fail_cnt); - ODP_DBG(" total_promote_cnt=%" PRIu64 " promote_fail_cnt=%" - PRIu64 "\n", timer_wheels->total_promote_cnt, - timer_wheels->promote_fail_cnt); - ODP_DBG(" free_list_size=%u min_size=%u peak_size=%u\n", - timer_wheels->free_list_size, timer_wheels->min_free_list_size, - timer_wheels->peak_free_list_size); - ODP_DBG(" expired_timers_ring size=%u count=%u " - "peak_count=%u full_cnt=%u\n", - expired_ring->max_idx + 1, expired_ring->count, - expired_ring->peak_count, expired_ring->expired_ring_full_cnt); + ODP_PRINT(" total timer_inserts=%" PRIu64 " timer_removes=%" PRIu64 + " insert_fails=%" PRIu64 "\n", + timer_wheels->total_timer_inserts, + timer_wheels->total_timer_removes, + timer_wheels->insert_fail_cnt); + ODP_PRINT(" total_promote_cnt=%" PRIu64 " promote_fail_cnt=%" + PRIu64 "\n", timer_wheels->total_promote_cnt, + timer_wheels->promote_fail_cnt); + ODP_PRINT(" free_list_size=%u min_size=%u peak_size=%u\n", + timer_wheels->free_list_size, + timer_wheels->min_free_list_size, + timer_wheels->peak_free_list_size); + ODP_PRINT(" expired_timers_ring size=%u count=%u " + "peak_count=%u full_cnt=%u\n", + expired_ring->max_idx + 1, expired_ring->count, + expired_ring->peak_count, + expired_ring->expired_ring_full_cnt); } void _odp_timer_wheel_destroy(_odp_timer_wheel_t timer_wheel) diff --git a/platform/linux-generic/odp_traffic_mngr.c b/platform/linux-generic/odp_traffic_mngr.c index 1fa2d27..3dad3ad 100644 --- a/platform/linux-generic/odp_traffic_mngr.c +++ b/platform/linux-generic/odp_traffic_mngr.c @@ -4406,19 +4406,19 @@ void odp_tm_stats_print(odp_tm_t odp_tm) tm_system = GET_TM_SYSTEM(odp_tm); input_work_queue = tm_system->input_work_queue; - ODP_DBG("odp_tm_stats_print - tm_system=0x%" PRIX64 " tm_idx=%u\n", - odp_tm, tm_system->tm_idx); - ODP_DBG(" input_work_queue size=%u current cnt=%u peak cnt=%u\n", - INPUT_WORK_RING_SIZE, input_work_queue->queue_cnt, - input_work_queue->peak_cnt); - ODP_DBG(" input_work_queue enqueues=%" PRIu64 " dequeues=% " PRIu64 - " fail_cnt=%" PRIu64 "\n", input_work_queue->total_enqueues, - input_work_queue->total_dequeues, - input_work_queue->enqueue_fail_cnt); - ODP_DBG(" green_cnt=%" PRIu64 " yellow_cnt=%" PRIu64 " red_cnt=%" - PRIu64 "\n", tm_system->shaper_green_cnt, - tm_system->shaper_yellow_cnt, - tm_system->shaper_red_cnt); + ODP_PRINT("odp_tm_stats_print - tm_system=0x%" PRIX64 " tm_idx=%u\n", + odp_tm, tm_system->tm_idx); + ODP_PRINT(" input_work_queue size=%u current cnt=%u peak cnt=%u\n", + INPUT_WORK_RING_SIZE, input_work_queue->queue_cnt, + input_work_queue->peak_cnt); + ODP_PRINT(" input_work_queue enqueues=%" PRIu64 " dequeues=% " PRIu64 + " fail_cnt=%" PRIu64 "\n", input_work_queue->total_enqueues, + input_work_queue->total_dequeues, + input_work_queue->enqueue_fail_cnt); + ODP_PRINT(" green_cnt=%" PRIu64 " yellow_cnt=%" PRIu64 " red_cnt=%" + PRIu64 "\n", tm_system->shaper_green_cnt, + tm_system->shaper_yellow_cnt, + tm_system->shaper_red_cnt); _odp_pkt_queue_stats_print(tm_system->_odp_int_queue_pool); _odp_timer_wheel_stats_print(tm_system->_odp_int_timer_wheel); @@ -4428,14 +4428,13 @@ void odp_tm_stats_print(odp_tm_t odp_tm) for (queue_num = 1; queue_num < max_queue_num; queue_num++) { tm_queue_obj = tm_system->queue_num_tbl[queue_num]; if (tm_queue_obj && tm_queue_obj->pkts_rcvd_cnt != 0) - ODP_DBG("queue_num=%u priority=%u rcvd=%u enqueued=%u " - "dequeued=%u consumed=%u\n", - queue_num, - tm_queue_obj->priority, - tm_queue_obj->pkts_rcvd_cnt, - tm_queue_obj->pkts_enqueued_cnt, - tm_queue_obj->pkts_dequeued_cnt, - tm_queue_obj->pkts_consumed_cnt); + ODP_PRINT("queue_num=%u priority=%u rcvd=%u " + "enqueued=%u dequeued=%u consumed=%u\n", + queue_num, tm_queue_obj->priority, + tm_queue_obj->pkts_rcvd_cnt, + tm_queue_obj->pkts_enqueued_cnt, + tm_queue_obj->pkts_dequeued_cnt, + tm_queue_obj->pkts_consumed_cnt); } }
ODP_DBG() macro should not be used inside *_print() functions. Since debug messages are disabled by default, the print functions won't output anything with ODP_DBG. The patch also removes the extra space previously added to the beginning of ODP_PRINT messages. Signed-off-by: Matias Elo <matias.elo@nokia.com> --- V2: - Don't modify secondary_hash_dump() - Remove unused tm debug print functions .../linux-generic/include/odp_debug_internal.h | 2 +- platform/linux-generic/odp_pkt_queue.c | 34 +++-- platform/linux-generic/odp_pool.c | 139 ++++++++++++++------- platform/linux-generic/odp_sorted_list.c | 21 +++- platform/linux-generic/odp_timer_wheel.c | 44 +++---- platform/linux-generic/odp_traffic_mngr.c | 41 +++--- 6 files changed, 174 insertions(+), 107 deletions(-)