[3/4] hash: remove memory orderings from rw-lock lookup fns

Message ID 20181109163917.16845-4-honnappa.nagarahalli@arm.com
State New
Headers show
Series
  • hash: separate lf and rw lock lookup code paths
Related show

Commit Message

Honnappa Nagarahalli Nov. 9, 2018, 4:39 p.m.
Remove the memory orderings from lookup functions using
rw-lock.
This is an intermediate commit meant to ease the
review process.

Fixes: e605a1d36 ("hash: add lock-free r/w concurrency")
Cc: honnappa.nagarahalli@arm.com

Suggested-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>
Signed-off-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>

Reviewed-by: Ola Liljedahl <ola.liljedahl@arm.com>

Reviewed-by: Gavin Hu <gavin.hu@arm.com>

---
 lib/librte_hash/rte_cuckoo_hash.c | 277 +++++++++++-------------------
 1 file changed, 105 insertions(+), 172 deletions(-)

-- 
2.17.1

Comments

Jerin Jacob Nov. 10, 2018, 8:51 a.m. | #1
-----Original Message-----
> Date: Fri, 9 Nov 2018 10:39:16 -0600

> From: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>

> To: bruce.richardson@intel.com, pablo.de.lara.guarch@intel.com

> CC: dev@dpdk.org, jerin.jacob@caviumnetworks.com, hemant.agrawal@nxp.com,

>  chaozhu@linux.vnet.ibm.com, yipeng1.wang@intel.com,

>  dharmik.thakkar@arm.com, gavin.hu@arm.com, honnappa.nagarahalli@arm.com,

>  nd@arm.com

> Subject: [PATCH 3/4] hash: remove memory orderings from rw-lock lookup fns

> X-Mailer: git-send-email 2.17.1

> 

> 

> Remove the memory orderings from lookup functions using

> rw-lock.

> This is an intermediate commit meant to ease the

> review process.

> 

> Fixes: e605a1d36 ("hash: add lock-free r/w concurrency")

> Cc: honnappa.nagarahalli@arm.com

> 

> Suggested-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>

> Signed-off-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>

> Reviewed-by: Ola Liljedahl <ola.liljedahl@arm.com>

> Reviewed-by: Gavin Hu <gavin.hu@arm.com>

> ---

>  lib/librte_hash/rte_cuckoo_hash.c | 277 +++++++++++-------------------

>  1 file changed, 105 insertions(+), 172 deletions(-)

> 

> diff --git a/lib/librte_hash/rte_cuckoo_hash.c b/lib/librte_hash/rte_cuckoo_hash.c

> index e6b84c6bc..9390dc5e4 100644

> --- a/lib/librte_hash/rte_cuckoo_hash.c

> +++ b/lib/librte_hash/rte_cuckoo_hash.c

> @@ -1135,27 +1135,22 @@ search_one_bucket(const struct rte_hash *h, const void *key, uint16_t sig,

>                         void **data, const struct rte_hash_bucket *bkt)

>  {

>         int i;

> -       uint32_t key_idx;

> -       void *pdata;

>         struct rte_hash_key *k, *keys = h->key_store;

> 

>         for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {

> -               key_idx = __atomic_load_n(&bkt->key_idx[i],

> -                                         __ATOMIC_ACQUIRE);

> -               if (bkt->sig_current[i] == sig && key_idx != EMPTY_SLOT) {

> +               if (bkt->sig_current[i] == sig &&

> +                               bkt->key_idx[i] != EMPTY_SLOT) {

>                         k = (struct rte_hash_key *) ((char *)keys +

> -                                       key_idx * h->key_entry_size);

> -                       pdata = __atomic_load_n(&k->pdata,

> -                                       __ATOMIC_ACQUIRE);

> +                                       bkt->key_idx[i] * h->key_entry_size);

> 

>                         if (rte_hash_cmp_eq(key, k->key, h) == 0) {

>                                 if (data != NULL)

> -                                       *data = pdata;

> +                                       *data = k->pdata;

>                                 /*

>                                  * Return index where key is stored,

>                                  * subtracting the first dummy index

>                                  */

> -                               return key_idx - 1;

> +                               return bkt->key_idx[i] - 1;

>                         }

>                 }

>         }

> @@ -1201,7 +1196,6 @@ __rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,

>  {

>         uint32_t prim_bucket_idx, sec_bucket_idx;

>         struct rte_hash_bucket *bkt, *cur_bkt;

> -       uint32_t cnt_b, cnt_a;

>         int ret;

>         uint16_t short_sig;

> 

> @@ -1211,49 +1205,25 @@ __rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,

> 

>         __hash_rw_reader_lock(h);

> 

> -       do {

> -               /* Load the table change counter before the lookup

> -                * starts. Acquire semantics will make sure that

> -                * loads in search_one_bucket are not hoisted.

> -                */

> -               cnt_b = __atomic_load_n(h->tbl_chng_cnt,

> -                               __ATOMIC_ACQUIRE);

> +       /* Check if key is in primary location */

> +       bkt = &h->buckets[prim_bucket_idx];



In original version, this bkt assignment is before to __hash_rw_reader_lock().
This causing performance issue in lookup 'hit' case.

Following change is fixing it.i.e bringing back to orginal version.

[master]83xx1.2[dpdk]# git diff
diff --git a/lib/librte_hash/rte_cuckoo_hash.c
b/lib/librte_hash/rte_cuckoo_hash.c
index 7e1a9ac96..bc8a55f0f 100644
--- a/lib/librte_hash/rte_cuckoo_hash.c
+++ b/lib/librte_hash/rte_cuckoo_hash.c
@@ -1204,10 +1204,11 @@ __rte_hash_lookup_with_hash_l(const struct
rte_hash *h, const void *key,
        prim_bucket_idx = get_prim_bucket_index(h, sig);
        sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx,
short_sig);
 
-       __hash_rw_reader_lock(h);
-
        /* Check if key is in primary location */
        bkt = &h->buckets[prim_bucket_idx];
+
+       __hash_rw_reader_lock(h);
+
        ret = search_one_bucket_l(h, key, short_sig, data, bkt);
        if (ret != -1) {
                __hash_rw_reader_unlock(h);


Could you send the final version that needs to taken into tree.
i.e remove intermediate commits only for review purpose.
I can test it finally with that.
Honnappa Nagarahalli Nov. 10, 2018, 6:58 p.m. | #2
> > @@ -1211,49 +1205,25 @@ __rte_hash_lookup_with_hash(const struct

> > rte_hash *h, const void *key,

> >

> >         __hash_rw_reader_lock(h);

> >

> > -       do {

> > -               /* Load the table change counter before the lookup

> > -                * starts. Acquire semantics will make sure that

> > -                * loads in search_one_bucket are not hoisted.

> > -                */

> > -               cnt_b = __atomic_load_n(h->tbl_chng_cnt,

> > -                               __ATOMIC_ACQUIRE);

> > +       /* Check if key is in primary location */

> > +       bkt = &h->buckets[prim_bucket_idx];

> 

> 

> In original version, this bkt assignment is before to __hash_rw_reader_lock().

> This causing performance issue in lookup 'hit' case.

> 

> Following change is fixing it.i.e bringing back to orginal version.

> 

> [master]83xx1.2[dpdk]# git diff

> diff --git a/lib/librte_hash/rte_cuckoo_hash.c

> b/lib/librte_hash/rte_cuckoo_hash.c

> index 7e1a9ac96..bc8a55f0f 100644

> --- a/lib/librte_hash/rte_cuckoo_hash.c

> +++ b/lib/librte_hash/rte_cuckoo_hash.c

> @@ -1204,10 +1204,11 @@ __rte_hash_lookup_with_hash_l(const struct

> rte_hash *h, const void *key,

>         prim_bucket_idx = get_prim_bucket_index(h, sig);

>         sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx, short_sig);

> 

> -       __hash_rw_reader_lock(h);

> -

>         /* Check if key is in primary location */

>         bkt = &h->buckets[prim_bucket_idx];

> +

> +       __hash_rw_reader_lock(h);

> +

>         ret = search_one_bucket_l(h, key, short_sig, data, bkt);

>         if (ret != -1) {

>                 __hash_rw_reader_unlock(h);

> 

> 

> Could you send the final version that needs to taken into tree.

> i.e remove intermediate commits only for review purpose.

> I can test it finally with that.

Thanks Jerin for testing. I have sent out v2.

Patch

diff --git a/lib/librte_hash/rte_cuckoo_hash.c b/lib/librte_hash/rte_cuckoo_hash.c
index e6b84c6bc..9390dc5e4 100644
--- a/lib/librte_hash/rte_cuckoo_hash.c
+++ b/lib/librte_hash/rte_cuckoo_hash.c
@@ -1135,27 +1135,22 @@  search_one_bucket(const struct rte_hash *h, const void *key, uint16_t sig,
 			void **data, const struct rte_hash_bucket *bkt)
 {
 	int i;
-	uint32_t key_idx;
-	void *pdata;
 	struct rte_hash_key *k, *keys = h->key_store;
 
 	for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
-		key_idx = __atomic_load_n(&bkt->key_idx[i],
-					  __ATOMIC_ACQUIRE);
-		if (bkt->sig_current[i] == sig && key_idx != EMPTY_SLOT) {
+		if (bkt->sig_current[i] == sig &&
+				bkt->key_idx[i] != EMPTY_SLOT) {
 			k = (struct rte_hash_key *) ((char *)keys +
-					key_idx * h->key_entry_size);
-			pdata = __atomic_load_n(&k->pdata,
-					__ATOMIC_ACQUIRE);
+					bkt->key_idx[i] * h->key_entry_size);
 
 			if (rte_hash_cmp_eq(key, k->key, h) == 0) {
 				if (data != NULL)
-					*data = pdata;
+					*data = k->pdata;
 				/*
 				 * Return index where key is stored,
 				 * subtracting the first dummy index
 				 */
-				return key_idx - 1;
+				return bkt->key_idx[i] - 1;
 			}
 		}
 	}
@@ -1201,7 +1196,6 @@  __rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,
 {
 	uint32_t prim_bucket_idx, sec_bucket_idx;
 	struct rte_hash_bucket *bkt, *cur_bkt;
-	uint32_t cnt_b, cnt_a;
 	int ret;
 	uint16_t short_sig;
 
@@ -1211,49 +1205,25 @@  __rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,
 
 	__hash_rw_reader_lock(h);
 
-	do {
-		/* Load the table change counter before the lookup
-		 * starts. Acquire semantics will make sure that
-		 * loads in search_one_bucket are not hoisted.
-		 */
-		cnt_b = __atomic_load_n(h->tbl_chng_cnt,
-				__ATOMIC_ACQUIRE);
+	/* Check if key is in primary location */
+	bkt = &h->buckets[prim_bucket_idx];
+	ret = search_one_bucket(h, key, short_sig, data, bkt);
+	if (ret != -1) {
+		__hash_rw_reader_unlock(h);
+		return ret;
+	}
+	/* Calculate secondary hash */
+	bkt = &h->buckets[sec_bucket_idx];
 
-		/* Check if key is in primary location */
-		bkt = &h->buckets[prim_bucket_idx];
-		ret = search_one_bucket(h, key, short_sig, data, bkt);
+	/* Check if key is in secondary location */
+	FOR_EACH_BUCKET(cur_bkt, bkt) {
+		ret = search_one_bucket(h, key, short_sig,
+					data, cur_bkt);
 		if (ret != -1) {
 			__hash_rw_reader_unlock(h);
 			return ret;
 		}
-		/* Calculate secondary hash */
-		bkt = &h->buckets[sec_bucket_idx];
-
-		/* Check if key is in secondary location */
-		FOR_EACH_BUCKET(cur_bkt, bkt) {
-			ret = search_one_bucket(h, key, short_sig,
-						data, cur_bkt);
-			if (ret != -1) {
-				__hash_rw_reader_unlock(h);
-				return ret;
-			}
-		}
-
-		/* The loads of sig_current in search_one_bucket
-		 * should not move below the load from tbl_chng_cnt.
-		 */
-		__atomic_thread_fence(__ATOMIC_ACQUIRE);
-		/* Re-read the table change counter to check if the
-		 * table has changed during search. If yes, re-do
-		 * the search.
-		 * This load should not get hoisted. The load
-		 * acquires on cnt_b, key index in primary bucket
-		 * and key index in secondary bucket will make sure
-		 * that it does not get hoisted.
-		 */
-		cnt_a = __atomic_load_n(h->tbl_chng_cnt,
-					__ATOMIC_ACQUIRE);
-	} while (cnt_b != cnt_a);
+	}
 
 	__hash_rw_reader_unlock(h);
 
@@ -1638,8 +1608,6 @@  __rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
 	uint32_t prim_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
 	uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
 	struct rte_hash_bucket *cur_bkt, *next_bkt;
-	void *pdata[RTE_HASH_LOOKUP_BULK_MAX];
-	uint32_t cnt_b, cnt_a;
 
 	/* Prefetch first keys */
 	for (i = 0; i < PREFETCH_OFFSET && i < num_keys; i++)
@@ -1681,138 +1649,103 @@  __rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
 	}
 
 	__hash_rw_reader_lock(h);
-	do {
-		/* Load the table change counter before the lookup
-		 * starts. Acquire semantics will make sure that
-		 * loads in compare_signatures are not hoisted.
-		 */
-		cnt_b = __atomic_load_n(h->tbl_chng_cnt,
-					__ATOMIC_ACQUIRE);
-
-		/* Compare signatures and prefetch key slot of first hit */
-		for (i = 0; i < num_keys; i++) {
-			compare_signatures(&prim_hitmask[i], &sec_hitmask[i],
-				primary_bkt[i], secondary_bkt[i],
-				sig[i], h->sig_cmp_fn);
-
-			if (prim_hitmask[i]) {
-				uint32_t first_hit =
-						__builtin_ctzl(prim_hitmask[i])
-						>> 1;
-				uint32_t key_idx =
-					primary_bkt[i]->key_idx[first_hit];
-				const struct rte_hash_key *key_slot =
-					(const struct rte_hash_key *)(
-					(const char *)h->key_store +
-					key_idx * h->key_entry_size);
-				rte_prefetch0(key_slot);
-				continue;
-			}
 
-			if (sec_hitmask[i]) {
-				uint32_t first_hit =
-						__builtin_ctzl(sec_hitmask[i])
-						>> 1;
-				uint32_t key_idx =
-					secondary_bkt[i]->key_idx[first_hit];
-				const struct rte_hash_key *key_slot =
-					(const struct rte_hash_key *)(
-					(const char *)h->key_store +
-					key_idx * h->key_entry_size);
-				rte_prefetch0(key_slot);
-			}
+	/* Compare signatures and prefetch key slot of first hit */
+	for (i = 0; i < num_keys; i++) {
+		compare_signatures(&prim_hitmask[i], &sec_hitmask[i],
+			primary_bkt[i], secondary_bkt[i],
+			sig[i], h->sig_cmp_fn);
+
+		if (prim_hitmask[i]) {
+			uint32_t first_hit =
+					__builtin_ctzl(prim_hitmask[i])
+					>> 1;
+			uint32_t key_idx =
+				primary_bkt[i]->key_idx[first_hit];
+			const struct rte_hash_key *key_slot =
+				(const struct rte_hash_key *)(
+				(const char *)h->key_store +
+				key_idx * h->key_entry_size);
+			rte_prefetch0(key_slot);
+			continue;
 		}
 
-		/* Compare keys, first hits in primary first */
-		for (i = 0; i < num_keys; i++) {
-			positions[i] = -ENOENT;
-			while (prim_hitmask[i]) {
-				uint32_t hit_index =
-						__builtin_ctzl(prim_hitmask[i])
-						>> 1;
-				uint32_t key_idx =
-				__atomic_load_n(
-					&primary_bkt[i]->key_idx[hit_index],
-					__ATOMIC_ACQUIRE);
-				const struct rte_hash_key *key_slot =
-					(const struct rte_hash_key *)(
-					(const char *)h->key_store +
-					key_idx * h->key_entry_size);
+		if (sec_hitmask[i]) {
+			uint32_t first_hit =
+					__builtin_ctzl(sec_hitmask[i])
+					>> 1;
+			uint32_t key_idx =
+				secondary_bkt[i]->key_idx[first_hit];
+			const struct rte_hash_key *key_slot =
+				(const struct rte_hash_key *)(
+				(const char *)h->key_store +
+				key_idx * h->key_entry_size);
+			rte_prefetch0(key_slot);
+		}
+	}
 
-				if (key_idx != EMPTY_SLOT)
-					pdata[i] = __atomic_load_n(
-							&key_slot->pdata,
-							__ATOMIC_ACQUIRE);
-				/*
-				 * If key index is 0, do not compare key,
-				 * as it is checking the dummy slot
-				 */
-				if (!!key_idx &
-					!rte_hash_cmp_eq(
-						key_slot->key, keys[i], h)) {
-					if (data != NULL)
-						data[i] = pdata[i];
+	/* Compare keys, first hits in primary first */
+	for (i = 0; i < num_keys; i++) {
+		positions[i] = -ENOENT;
+		while (prim_hitmask[i]) {
+			uint32_t hit_index =
+					__builtin_ctzl(prim_hitmask[i])
+					>> 1;
+			uint32_t key_idx =
+				primary_bkt[i]->key_idx[hit_index];
+			const struct rte_hash_key *key_slot =
+				(const struct rte_hash_key *)(
+				(const char *)h->key_store +
+				key_idx * h->key_entry_size);
+
+			/*
+			 * If key index is 0, do not compare key,
+			 * as it is checking the dummy slot
+			 */
+			if (!!key_idx &
+				!rte_hash_cmp_eq(
+					key_slot->key, keys[i], h)) {
+				if (data != NULL)
+					data[i] = key_slot->pdata;
 
-					hits |= 1ULL << i;
-					positions[i] = key_idx - 1;
-					goto next_key;
-				}
-				prim_hitmask[i] &= ~(3ULL << (hit_index << 1));
+				hits |= 1ULL << i;
+				positions[i] = key_idx - 1;
+				goto next_key;
 			}
+			prim_hitmask[i] &= ~(3ULL << (hit_index << 1));
+		}
 
-			while (sec_hitmask[i]) {
-				uint32_t hit_index =
-						__builtin_ctzl(sec_hitmask[i])
-						>> 1;
-				uint32_t key_idx =
-				__atomic_load_n(
-					&secondary_bkt[i]->key_idx[hit_index],
-					__ATOMIC_ACQUIRE);
-				const struct rte_hash_key *key_slot =
-					(const struct rte_hash_key *)(
-					(const char *)h->key_store +
-					key_idx * h->key_entry_size);
-
-				if (key_idx != EMPTY_SLOT)
-					pdata[i] = __atomic_load_n(
-							&key_slot->pdata,
-							__ATOMIC_ACQUIRE);
-				/*
-				 * If key index is 0, do not compare key,
-				 * as it is checking the dummy slot
-				 */
+		while (sec_hitmask[i]) {
+			uint32_t hit_index =
+					__builtin_ctzl(sec_hitmask[i])
+					>> 1;
+			uint32_t key_idx =
+				secondary_bkt[i]->key_idx[hit_index];
+			const struct rte_hash_key *key_slot =
+				(const struct rte_hash_key *)(
+				(const char *)h->key_store +
+				key_idx * h->key_entry_size);
+
+			/*
+			 * If key index is 0, do not compare key,
+			 * as it is checking the dummy slot
+			 */
 
-				if (!!key_idx &
-					!rte_hash_cmp_eq(
-						key_slot->key, keys[i], h)) {
-					if (data != NULL)
-						data[i] = pdata[i];
+			if (!!key_idx &
+				!rte_hash_cmp_eq(
+					key_slot->key, keys[i], h)) {
+				if (data != NULL)
+					data[i] = key_slot->pdata;
 
-					hits |= 1ULL << i;
-					positions[i] = key_idx - 1;
-					goto next_key;
-				}
-				sec_hitmask[i] &= ~(3ULL << (hit_index << 1));
+				hits |= 1ULL << i;
+				positions[i] = key_idx - 1;
+				goto next_key;
 			}
-next_key:
-			continue;
+			sec_hitmask[i] &= ~(3ULL << (hit_index << 1));
 		}
-
-		/* The loads of sig_current in compare_signatures
-		 * should not move below the load from tbl_chng_cnt.
-		 */
-		__atomic_thread_fence(__ATOMIC_ACQUIRE);
-		/* Re-read the table change counter to check if the
-		 * table has changed during search. If yes, re-do
-		 * the search.
-		 * This load should not get hoisted. The load
-		 * acquires on cnt_b, primary key index and secondary
-		 * key index will make sure that it does not get
-		 * hoisted.
-		 */
-		cnt_a = __atomic_load_n(h->tbl_chng_cnt,
-					__ATOMIC_ACQUIRE);
-	} while (cnt_b != cnt_a);
+next_key:
+		continue;
+	}
 
 	/* all found, do not need to go through ext bkt */
 	if ((hits == ((1ULL << num_keys) - 1)) || !h->ext_table_support) {