diff mbox series

[v2,1/4] hash: prepare for deprecation of flags

Message ID 20181101232522.702-2-honnappa.nagarahalli@arm.com
State New
Headers show
Series hash: deprecate lock ellision and read/write concurreny flags | expand

Commit Message

Honnappa Nagarahalli Nov. 1, 2018, 11:25 p.m. UTC
Lock ellision and read/write concurreny flags need to be deprecated.
Create the new version of the function and fix checkpatch issues.

Signed-off-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>

Reviewed-by: Dharmik Thakkar <dharmik.thakkar@arm.com>

Reviewed-by: Gavin Hu <gavin.hu@arm.com>

---
 lib/librte_hash/rte_cuckoo_hash.c | 329 ++++++++++++++++++++++++++++++
 lib/librte_hash/rte_hash.h        |   2 +
 2 files changed, 331 insertions(+)

-- 
2.17.1

Comments

Bruce Richardson Nov. 2, 2018, 11:14 a.m. UTC | #1
On Thu, Nov 01, 2018 at 06:25:19PM -0500, Honnappa Nagarahalli wrote:
> Lock ellision and read/write concurreny flags need to be deprecated.

typo: concurrency.

> Create the new version of the function and fix checkpatch issues.

> 

Maybe emphasise that the new version is identical to the old, in case this
patch does not get squashed into next.
"Create a copy of the hash_create function, identical to the original
except for fixing checkpatch issues. Later patches will update this copy"

> Signed-off-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>

> Reviewed-by: Dharmik Thakkar <dharmik.thakkar@arm.com>

> Reviewed-by: Gavin Hu <gavin.hu@arm.com>

> ---
diff mbox series

Patch

diff --git a/lib/librte_hash/rte_cuckoo_hash.c b/lib/librte_hash/rte_cuckoo_hash.c
index 5ddcccd87..ec3b519ba 100644
--- a/lib/librte_hash/rte_cuckoo_hash.c
+++ b/lib/librte_hash/rte_cuckoo_hash.c
@@ -447,6 +447,335 @@  rte_hash_create(const struct rte_hash_parameters *params)
 	return NULL;
 }
 
+struct rte_hash *
+rte_hash_create_v1811(const struct rte_hash_parameters *params)
+{
+	struct rte_hash *h = NULL;
+	struct rte_tailq_entry *te = NULL;
+	struct rte_hash_list *hash_list;
+	struct rte_ring *r = NULL;
+	struct rte_ring *r_ext = NULL;
+	char hash_name[RTE_HASH_NAMESIZE];
+	void *k = NULL;
+	void *buckets = NULL;
+	void *buckets_ext = NULL;
+	char ring_name[RTE_RING_NAMESIZE];
+	char ext_ring_name[RTE_RING_NAMESIZE];
+	unsigned num_key_slots;
+	unsigned i;
+	unsigned int hw_trans_mem_support = 0, use_local_cache = 0;
+	unsigned int ext_table_support = 0;
+	unsigned int readwrite_concur_support = 0;
+	unsigned int writer_takes_lock = 0;
+	unsigned int no_free_on_del = 0;
+	uint32_t *tbl_chng_cnt = NULL;
+	unsigned int readwrite_concur_lf_support = 0;
+
+	rte_hash_function default_hash_func = (rte_hash_function)rte_jhash;
+
+	hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
+
+	if (params == NULL) {
+		RTE_LOG(ERR, HASH, "%s: no parameters\n", __func__);
+		return NULL;
+	}
+
+	/* Check for valid parameters */
+	if ((params->entries > RTE_HASH_ENTRIES_MAX) ||
+			(params->entries < RTE_HASH_BUCKET_ENTRIES) ||
+			(params->key_len == 0)) {
+		rte_errno = EINVAL;
+		RTE_LOG(ERR, HASH, "%s: invalid parameters\n", __func__);
+		return NULL;
+	}
+
+	/* Validate correct usage of extra options */
+	if ((params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY) &&
+	    (params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY_LF)) {
+		rte_errno = EINVAL;
+		RTE_LOG(ERR, HASH, "rte_hash_create: choose rw concurrency or "
+			"rw concurrency lock free\n");
+		return NULL;
+	}
+
+	if ((params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY_LF) &&
+	    (params->extra_flag & RTE_HASH_EXTRA_FLAGS_EXT_TABLE)) {
+		rte_errno = EINVAL;
+		RTE_LOG(ERR, HASH, "%s: extendable bucket feature not supported with rw concurrency lock free\n", __func__);
+		return NULL;
+	}
+
+	/* Check extra flags field to check extra options. */
+	if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_TRANS_MEM_SUPPORT)
+		hw_trans_mem_support = 1;
+
+	if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_MULTI_WRITER_ADD) {
+		use_local_cache = 1;
+		writer_takes_lock = 1;
+	}
+
+	if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY) {
+		readwrite_concur_support = 1;
+		writer_takes_lock = 1;
+	}
+
+	if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_EXT_TABLE)
+		ext_table_support = 1;
+
+	if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_NO_FREE_ON_DEL)
+		no_free_on_del = 1;
+
+	if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY_LF) {
+		readwrite_concur_lf_support = 1;
+		/* Enable not freeing internal memory/index on delete */
+		no_free_on_del = 1;
+	}
+
+	/* Store all keys and leave the first entry as a dummy entry
+	 * for lookup_bulk.
+	 */
+	if (use_local_cache)
+		/*
+		 * Increase number of slots by total number of indices
+		 * that can be stored in the lcore caches
+		 * except for the first cache
+		 */
+		num_key_slots = params->entries + (RTE_MAX_LCORE - 1) *
+					(LCORE_CACHE_SIZE - 1) + 1;
+	else
+		num_key_slots = params->entries + 1;
+
+	snprintf(ring_name, sizeof(ring_name), "HT_%s", params->name);
+	/* Create ring (Dummy slot index is not enqueued) */
+	r = rte_ring_create(ring_name, rte_align32pow2(num_key_slots),
+			params->socket_id, 0);
+	if (r == NULL) {
+		RTE_LOG(ERR, HASH, "memory allocation failed\n");
+		goto err;
+	}
+
+	const uint32_t num_buckets = rte_align32pow2(params->entries) /
+						RTE_HASH_BUCKET_ENTRIES;
+
+	/* Create ring for extendable buckets. */
+	if (ext_table_support) {
+		snprintf(ext_ring_name, sizeof(ext_ring_name), "HT_EXT_%s",
+								params->name);
+		r_ext = rte_ring_create(ext_ring_name,
+				rte_align32pow2(num_buckets + 1),
+				params->socket_id, 0);
+
+		if (r_ext == NULL) {
+			RTE_LOG(ERR, HASH, "ext buckets memory allocation "
+								"failed\n");
+			goto err;
+		}
+	}
+
+	snprintf(hash_name, sizeof(hash_name), "HT_%s", params->name);
+
+	rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
+
+	/* guarantee there's no existing: this is normally already checked
+	 * by ring creation above.
+	 */
+	TAILQ_FOREACH(te, hash_list, next) {
+		h = (struct rte_hash *) te->data;
+		if (strncmp(params->name, h->name, RTE_HASH_NAMESIZE) == 0)
+			break;
+	}
+	h = NULL;
+	if (te != NULL) {
+		rte_errno = EEXIST;
+		te = NULL;
+		goto err_unlock;
+	}
+
+	te = rte_zmalloc("HASH_TAILQ_ENTRY", sizeof(*te), 0);
+	if (te == NULL) {
+		RTE_LOG(ERR, HASH, "tailq entry allocation failed\n");
+		goto err_unlock;
+	}
+
+	h = (struct rte_hash *)rte_zmalloc_socket(hash_name,
+					sizeof(struct rte_hash),
+					RTE_CACHE_LINE_SIZE, params->socket_id);
+
+	if (h == NULL) {
+		RTE_LOG(ERR, HASH, "memory allocation failed\n");
+		goto err_unlock;
+	}
+
+	buckets = rte_zmalloc_socket(NULL,
+				num_buckets * sizeof(struct rte_hash_bucket),
+				RTE_CACHE_LINE_SIZE, params->socket_id);
+
+	if (buckets == NULL) {
+		RTE_LOG(ERR, HASH, "buckets memory allocation failed\n");
+		goto err_unlock;
+	}
+
+	/* Allocate same number of extendable buckets */
+	if (ext_table_support) {
+		buckets_ext = rte_zmalloc_socket(NULL,
+				num_buckets * sizeof(struct rte_hash_bucket),
+				RTE_CACHE_LINE_SIZE, params->socket_id);
+		if (buckets_ext == NULL) {
+			RTE_LOG(ERR, HASH, "ext buckets memory allocation "
+							"failed\n");
+			goto err_unlock;
+		}
+		/* Populate ext bkt ring. We reserve 0 similar to the
+		 * key-data slot, just in case in future we want to
+		 * use bucket index for the linked list and 0 means NULL
+		 * for next bucket
+		 */
+		for (i = 1; i <= num_buckets; i++)
+			rte_ring_sp_enqueue(r_ext, (void *)((uintptr_t) i));
+	}
+
+	const uint32_t key_entry_size =
+		RTE_ALIGN(sizeof(struct rte_hash_key) + params->key_len,
+			  KEY_ALIGNMENT);
+	const uint64_t key_tbl_size = (uint64_t) key_entry_size * num_key_slots;
+
+	k = rte_zmalloc_socket(NULL, key_tbl_size,
+			RTE_CACHE_LINE_SIZE, params->socket_id);
+
+	if (k == NULL) {
+		RTE_LOG(ERR, HASH, "memory allocation failed\n");
+		goto err_unlock;
+	}
+
+	tbl_chng_cnt = rte_zmalloc_socket(NULL, sizeof(uint32_t),
+			RTE_CACHE_LINE_SIZE, params->socket_id);
+
+	if (tbl_chng_cnt == NULL) {
+		RTE_LOG(ERR, HASH, "memory allocation failed\n");
+		goto err_unlock;
+	}
+
+/*
+ * If x86 architecture is used, select appropriate compare function,
+ * which may use x86 intrinsics, otherwise use memcmp
+ */
+#if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
+	/* Select function to compare keys */
+	switch (params->key_len) {
+	case 16:
+		h->cmp_jump_table_idx = KEY_16_BYTES;
+		break;
+	case 32:
+		h->cmp_jump_table_idx = KEY_32_BYTES;
+		break;
+	case 48:
+		h->cmp_jump_table_idx = KEY_48_BYTES;
+		break;
+	case 64:
+		h->cmp_jump_table_idx = KEY_64_BYTES;
+		break;
+	case 80:
+		h->cmp_jump_table_idx = KEY_80_BYTES;
+		break;
+	case 96:
+		h->cmp_jump_table_idx = KEY_96_BYTES;
+		break;
+	case 112:
+		h->cmp_jump_table_idx = KEY_112_BYTES;
+		break;
+	case 128:
+		h->cmp_jump_table_idx = KEY_128_BYTES;
+		break;
+	default:
+		/* If key is not multiple of 16, use generic memcmp */
+		h->cmp_jump_table_idx = KEY_OTHER_BYTES;
+	}
+#else
+	h->cmp_jump_table_idx = KEY_OTHER_BYTES;
+#endif
+
+	if (use_local_cache) {
+		h->local_free_slots = rte_zmalloc_socket(NULL,
+				sizeof(struct lcore_cache) * RTE_MAX_LCORE,
+				RTE_CACHE_LINE_SIZE, params->socket_id);
+	}
+
+	/* Default hash function */
+#if defined(RTE_ARCH_X86)
+	default_hash_func = (rte_hash_function)rte_hash_crc;
+#elif defined(RTE_ARCH_ARM64)
+	if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_CRC32))
+		default_hash_func = (rte_hash_function)rte_hash_crc;
+#endif
+	/* Setup hash context */
+	snprintf(h->name, sizeof(h->name), "%s", params->name);
+	h->entries = params->entries;
+	h->key_len = params->key_len;
+	h->key_entry_size = key_entry_size;
+	h->hash_func_init_val = params->hash_func_init_val;
+
+	h->num_buckets = num_buckets;
+	h->bucket_bitmask = h->num_buckets - 1;
+	h->buckets = buckets;
+	h->buckets_ext = buckets_ext;
+	h->free_ext_bkts = r_ext;
+	h->hash_func = (params->hash_func == NULL) ?
+		default_hash_func : params->hash_func;
+	h->key_store = k;
+	h->free_slots = r;
+	h->tbl_chng_cnt = tbl_chng_cnt;
+	*h->tbl_chng_cnt = 0;
+	h->hw_trans_mem_support = hw_trans_mem_support;
+	h->use_local_cache = use_local_cache;
+	h->readwrite_concur_support = readwrite_concur_support;
+	h->ext_table_support = ext_table_support;
+	h->writer_takes_lock = writer_takes_lock;
+	h->no_free_on_del = no_free_on_del;
+	h->readwrite_concur_lf_support = readwrite_concur_lf_support;
+
+#if defined(RTE_ARCH_X86)
+	if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE2))
+		h->sig_cmp_fn = RTE_HASH_COMPARE_SSE;
+	else
+#endif
+		h->sig_cmp_fn = RTE_HASH_COMPARE_SCALAR;
+
+	/* Writer threads need to take the lock when:
+	 * 1) RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY is enabled OR
+	 * 2) RTE_HASH_EXTRA_FLAGS_MULTI_WRITER_ADD is enabled
+	 */
+	if (h->writer_takes_lock) {
+		h->readwrite_lock = rte_malloc(NULL, sizeof(rte_rwlock_t),
+						RTE_CACHE_LINE_SIZE);
+		if (h->readwrite_lock == NULL)
+			goto err_unlock;
+
+		rte_rwlock_init(h->readwrite_lock);
+	}
+
+	/* Populate free slots ring. Entry zero is reserved for key misses. */
+	for (i = 1; i < num_key_slots; i++)
+		rte_ring_sp_enqueue(r, (void *)((uintptr_t) i));
+
+	te->data = (void *) h;
+	TAILQ_INSERT_TAIL(hash_list, te, next);
+	rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+
+	return h;
+err_unlock:
+	rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
+err:
+	rte_ring_free(r);
+	rte_ring_free(r_ext);
+	rte_free(te);
+	rte_free(h);
+	rte_free(buckets);
+	rte_free(buckets_ext);
+	rte_free(k);
+	rte_free(tbl_chng_cnt);
+	return NULL;
+}
+
 void
 rte_hash_free(struct rte_hash *h)
 {
diff --git a/lib/librte_hash/rte_hash.h b/lib/librte_hash/rte_hash.h
index c93d1a137..f049b9dd0 100644
--- a/lib/librte_hash/rte_hash.h
+++ b/lib/librte_hash/rte_hash.h
@@ -105,6 +105,8 @@  struct rte_hash;
  */
 struct rte_hash *
 rte_hash_create(const struct rte_hash_parameters *params);
+struct rte_hash *
+rte_hash_create_v1811(const struct rte_hash_parameters *params);
 
 /**
  * Set a new hash compare function other than the default one.