@@ -132,9 +132,6 @@ struct odp_buffer_hdr_t {
uint32_t uarea_size; /* size of user area */
uint32_t segcount; /* segment count */
uint32_t segsize; /* segment size */
- /* ipc mapped process can not walk over pointers,
- * offset has to be used */
- uint64_t ipc_addr_offset[ODP_BUFFER_MAX_SEG];
void *addr[ODP_BUFFER_MAX_SEG]; /* block addrs */
uint64_t order; /* sequence for ordered queues */
queue_entry_t *origin_qe; /* ordered queue origin */
@@ -142,6 +139,11 @@ struct odp_buffer_hdr_t {
queue_entry_t *target_qe; /* ordered queue target */
uint64_t sync[ODP_CONFIG_MAX_ORDERED_LOCKS_PER_QUEUE];
};
+#ifdef _ODP_PKTIO_IPC
+ /* ipc mapped process can not walk over pointers,
+ * offset has to be used */
+ uint64_t ipc_addr_offset[ODP_BUFFER_MAX_SEG];
+#endif
};
/** @internal Compile time assert that the
@@ -436,8 +436,15 @@ static inline void *_ipc_buffer_map(odp_buffer_hdr_t *buf,
{
int seg_index = offset / buf->segsize;
int seg_offset = offset % buf->segsize;
+#ifdef _ODP_PKTIO_IPC
void *addr = (char *)buf - buf->ipc_addr_offset[seg_index];
+#else
+ /** buf_hdr.ipc_addr_offset defined only when ipc is
+ * enabled. */
+ void *addr = NULL;
+ (void)seg_index;
+#endif
if (seglen) {
uint32_t buf_left = limit - offset;
*seglen = seg_offset + buf_left <= buf->segsize ?
@@ -633,9 +640,16 @@ static int ipc_pktio_send(pktio_entry_t *pktio_entry,
* convert it to offset
*/
for (j = 0; j < ODP_BUFFER_MAX_SEG; j++) {
+#ifdef _ODP_PKTIO_IPC
pkt_hdr->buf_hdr.ipc_addr_offset[j] = (char *)pkt_hdr -
(char *)pkt_hdr->buf_hdr.addr[j];
+#else
+ /** buf_hdr.ipc_addr_offset defined only when ipc is
+ * enabled. */
+ (void)pkt_hdr;
+#endif
}
+
}
/* Put packets to ring to be processed by other process. */
Define ipc_addr_offset member of struct odp_buffer_hdr_t only if ipc pktio is enabled to reduce struct size. Signed-off-by: Matias Elo <matias.elo@nokia.com> --- platform/linux-generic/include/odp_buffer_internal.h | 8 +++++--- platform/linux-generic/pktio/ipc.c | 14 ++++++++++++++ 2 files changed, 19 insertions(+), 3 deletions(-)